repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/tests/test_tomek_links.py | """Test the module Tomek's links."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numpy as np
import pytest
from sklearn.datasets import make_classification
from sklearn.utils._testing import assert_array_equal
from imblearn.under_sampling import TomekLinks
X = np.array(
[
[0.31230513, 0.1216318],
[0.68481731, 0.51935141],
[1.34192108, -0.13367336],
[0.62366841, -0.21312976],
[1.61091956, -0.40283504],
[-0.37162401, -2.19400981],
[0.74680821, 1.63827342],
[0.2184254, 0.24299982],
[0.61472253, -0.82309052],
[0.19893132, -0.47761769],
[1.06514042, -0.0770537],
[0.97407872, 0.44454207],
[1.40301027, -0.83648734],
[-1.20515198, -1.02689695],
[-0.27410027, -0.54194484],
[0.8381014, 0.44085498],
[-0.23374509, 0.18370049],
[-0.32635887, -0.29299653],
[-0.00288378, 0.84259929],
[1.79580611, -0.02219234],
]
)
Y = np.array([1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0])
def test_tl_init():
tl = TomekLinks()
assert tl.n_jobs is None
def test_tl_fit_resample():
tl = TomekLinks()
X_resampled, y_resampled = tl.fit_resample(X, Y)
X_gt = np.array(
[
[0.31230513, 0.1216318],
[0.68481731, 0.51935141],
[1.34192108, -0.13367336],
[0.62366841, -0.21312976],
[1.61091956, -0.40283504],
[-0.37162401, -2.19400981],
[0.74680821, 1.63827342],
[0.2184254, 0.24299982],
[0.61472253, -0.82309052],
[0.19893132, -0.47761769],
[0.97407872, 0.44454207],
[1.40301027, -0.83648734],
[-1.20515198, -1.02689695],
[-0.23374509, 0.18370049],
[-0.32635887, -0.29299653],
[-0.00288378, 0.84259929],
[1.79580611, -0.02219234],
]
)
y_gt = np.array([1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
@pytest.mark.parametrize(
"sampling_strategy", ["auto", "majority", "not minority", "not majority", "all"]
)
def test_tomek_links_strings(sampling_strategy):
"""Check that we support all supposed strings as `sampling_strategy` in
a sampler inheriting from `BaseCleaningSampler`."""
X, y = make_classification(
n_samples=100,
n_clusters_per_class=1,
n_classes=3,
weights=[0.1, 0.3, 0.6],
random_state=0,
)
TomekLinks(sampling_strategy=sampling_strategy).fit_resample(X, y)
| 2,698 | 28.988889 | 84 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/tests/test_random_under_sampler.py | """Test the module random under sampler."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from collections import Counter
from datetime import datetime
import numpy as np
import pytest
from sklearn.datasets import make_classification
from sklearn.utils._testing import assert_array_equal
from imblearn.under_sampling import RandomUnderSampler
RND_SEED = 0
X = np.array(
[
[0.04352327, -0.20515826],
[0.92923648, 0.76103773],
[0.20792588, 1.49407907],
[0.47104475, 0.44386323],
[0.22950086, 0.33367433],
[0.15490546, 0.3130677],
[0.09125309, -0.85409574],
[0.12372842, 0.6536186],
[0.13347175, 0.12167502],
[0.094035, -2.55298982],
]
)
Y = np.array([1, 0, 1, 0, 1, 1, 1, 1, 0, 1])
@pytest.mark.parametrize("as_frame", [True, False], ids=["dataframe", "array"])
def test_rus_fit_resample(as_frame):
if as_frame:
pd = pytest.importorskip("pandas")
X_ = pd.DataFrame(X)
else:
X_ = X
rus = RandomUnderSampler(random_state=RND_SEED, replacement=True)
X_resampled, y_resampled = rus.fit_resample(X_, Y)
X_gt = np.array(
[
[0.92923648, 0.76103773],
[0.47104475, 0.44386323],
[0.13347175, 0.12167502],
[0.09125309, -0.85409574],
[0.12372842, 0.6536186],
[0.04352327, -0.20515826],
]
)
y_gt = np.array([0, 0, 0, 1, 1, 1])
if as_frame:
assert hasattr(X_resampled, "loc")
# FIXME: we should use to_numpy with pandas >= 0.25
X_resampled = X_resampled.values
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_rus_fit_resample_half():
sampling_strategy = {0: 3, 1: 6}
rus = RandomUnderSampler(
sampling_strategy=sampling_strategy,
random_state=RND_SEED,
replacement=True,
)
X_resampled, y_resampled = rus.fit_resample(X, Y)
X_gt = np.array(
[
[0.92923648, 0.76103773],
[0.47104475, 0.44386323],
[0.92923648, 0.76103773],
[0.15490546, 0.3130677],
[0.15490546, 0.3130677],
[0.15490546, 0.3130677],
[0.20792588, 1.49407907],
[0.15490546, 0.3130677],
[0.12372842, 0.6536186],
]
)
y_gt = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_multiclass_fit_resample():
y = Y.copy()
y[5] = 2
y[6] = 2
rus = RandomUnderSampler(random_state=RND_SEED)
X_resampled, y_resampled = rus.fit_resample(X, y)
count_y_res = Counter(y_resampled)
assert count_y_res[0] == 2
assert count_y_res[1] == 2
assert count_y_res[2] == 2
def test_random_under_sampling_heterogeneous_data():
X_hetero = np.array(
[["xxx", 1, 1.0], ["yyy", 2, 2.0], ["zzz", 3, 3.0]], dtype=object
)
y = np.array([0, 0, 1])
rus = RandomUnderSampler(random_state=RND_SEED)
X_res, y_res = rus.fit_resample(X_hetero, y)
assert X_res.shape[0] == 2
assert y_res.shape[0] == 2
assert X_res.dtype == object
def test_random_under_sampling_nan_inf():
# check that we can undersample even with missing or infinite data
# regression tests for #605
rng = np.random.RandomState(42)
n_not_finite = X.shape[0] // 3
row_indices = rng.choice(np.arange(X.shape[0]), size=n_not_finite)
col_indices = rng.randint(0, X.shape[1], size=n_not_finite)
not_finite_values = rng.choice([np.nan, np.inf], size=n_not_finite)
X_ = X.copy()
X_[row_indices, col_indices] = not_finite_values
rus = RandomUnderSampler(random_state=0)
X_res, y_res = rus.fit_resample(X_, Y)
assert y_res.shape == (6,)
assert X_res.shape == (6, 2)
assert np.any(~np.isfinite(X_res))
@pytest.mark.parametrize(
"sampling_strategy", ["auto", "majority", "not minority", "not majority", "all"]
)
def test_random_under_sampler_strings(sampling_strategy):
"""Check that we support all supposed strings as `sampling_strategy` in
a sampler inheriting from `BaseUnderSampler`."""
X, y = make_classification(
n_samples=100,
n_clusters_per_class=1,
n_classes=3,
weights=[0.1, 0.3, 0.6],
random_state=0,
)
RandomUnderSampler(sampling_strategy=sampling_strategy).fit_resample(X, y)
def test_random_under_sampling_datetime():
"""Check that we don't convert input data and only sample from it."""
pd = pytest.importorskip("pandas")
X = pd.DataFrame({"label": [0, 0, 0, 1], "td": [datetime.now()] * 4})
y = X["label"]
rus = RandomUnderSampler(random_state=0)
X_res, y_res = rus.fit_resample(X, y)
pd.testing.assert_series_equal(X_res.dtypes, X.dtypes)
pd.testing.assert_index_equal(X_res.index, y_res.index)
assert_array_equal(y_res.to_numpy(), np.array([0, 1]))
| 4,992 | 29.260606 | 84 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/tests/__init__.py | 0 | 0 | 0 | py | |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/tests/test_allknn.py | """Test the module repeated edited nearest neighbour."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numpy as np
import pytest
from sklearn.datasets import make_classification
from sklearn.neighbors import NearestNeighbors
from sklearn.utils._testing import assert_allclose, assert_array_equal
from imblearn.under_sampling import AllKNN
X = np.array(
[
[-0.12840393, 0.66446571],
[1.32319756, -0.13181616],
[0.04296502, -0.37981873],
[0.83631853, 0.18569783],
[1.02956816, 0.36061601],
[1.12202806, 0.33811558],
[-0.53171468, -0.53735182],
[1.3381556, 0.35956356],
[-0.35946678, 0.72510189],
[1.32326943, 0.28393874],
[2.94290565, -0.13986434],
[0.28294738, -1.00125525],
[0.34218094, -0.58781961],
[-0.88864036, -0.33782387],
[-1.10146139, 0.91782682],
[-0.7969716, -0.50493969],
[0.73489726, 0.43915195],
[0.2096964, -0.61814058],
[-0.28479268, 0.70459548],
[1.84864913, 0.14729596],
[1.59068979, -0.96622933],
[0.73418199, -0.02222847],
[0.50307437, 0.498805],
[0.84929742, 0.41042894],
[0.62649535, 0.46600596],
[0.79270821, -0.41386668],
[1.16606871, -0.25641059],
[1.57356906, 0.30390519],
[1.0304995, -0.16955962],
[1.67314371, 0.19231498],
[0.98382284, 0.37184502],
[0.48921682, -1.38504507],
[-0.46226554, -0.50481004],
[-0.03918551, -0.68540745],
[0.24991051, -1.00864997],
[0.80541964, -0.34465185],
[0.1732627, -1.61323172],
[0.69804044, 0.44810796],
[-0.5506368, -0.42072426],
[-0.34474418, 0.21969797],
]
)
Y = np.array(
[
1,
2,
2,
2,
1,
1,
0,
2,
1,
1,
1,
2,
2,
0,
1,
2,
1,
2,
1,
1,
2,
2,
1,
1,
1,
2,
2,
2,
2,
1,
1,
2,
0,
2,
2,
2,
2,
1,
2,
0,
]
)
R_TOL = 1e-4
def test_allknn_fit_resample():
allknn = AllKNN()
X_resampled, y_resampled = allknn.fit_resample(X, Y)
X_gt = np.array(
[
[-0.53171468, -0.53735182],
[-0.88864036, -0.33782387],
[-0.46226554, -0.50481004],
[-0.34474418, 0.21969797],
[1.02956816, 0.36061601],
[1.12202806, 0.33811558],
[-1.10146139, 0.91782682],
[0.73489726, 0.43915195],
[0.50307437, 0.498805],
[0.84929742, 0.41042894],
[0.62649535, 0.46600596],
[0.98382284, 0.37184502],
[0.69804044, 0.44810796],
[0.04296502, -0.37981873],
[0.28294738, -1.00125525],
[0.34218094, -0.58781961],
[0.2096964, -0.61814058],
[1.59068979, -0.96622933],
[0.73418199, -0.02222847],
[0.79270821, -0.41386668],
[1.16606871, -0.25641059],
[1.0304995, -0.16955962],
[0.48921682, -1.38504507],
[-0.03918551, -0.68540745],
[0.24991051, -1.00864997],
[0.80541964, -0.34465185],
[0.1732627, -1.61323172],
]
)
y_gt = np.array(
[
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
]
)
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_allclose(y_resampled, y_gt, rtol=R_TOL)
def test_all_knn_allow_minority():
X, y = make_classification(
n_samples=10000,
n_features=2,
n_informative=2,
n_redundant=0,
n_repeated=0,
n_classes=3,
n_clusters_per_class=1,
weights=[0.2, 0.3, 0.5],
class_sep=0.4,
random_state=0,
)
allknn = AllKNN(allow_minority=True)
X_res_1, y_res_1 = allknn.fit_resample(X, y)
allknn = AllKNN()
X_res_2, y_res_2 = allknn.fit_resample(X, y)
assert len(y_res_1) < len(y_res_2)
def test_allknn_fit_resample_mode():
allknn = AllKNN(kind_sel="mode")
X_resampled, y_resampled = allknn.fit_resample(X, Y)
X_gt = np.array(
[
[-0.53171468, -0.53735182],
[-0.88864036, -0.33782387],
[-0.46226554, -0.50481004],
[-0.34474418, 0.21969797],
[-0.12840393, 0.66446571],
[1.02956816, 0.36061601],
[1.12202806, 0.33811558],
[-0.35946678, 0.72510189],
[-1.10146139, 0.91782682],
[0.73489726, 0.43915195],
[-0.28479268, 0.70459548],
[0.50307437, 0.498805],
[0.84929742, 0.41042894],
[0.62649535, 0.46600596],
[0.98382284, 0.37184502],
[0.69804044, 0.44810796],
[1.32319756, -0.13181616],
[0.04296502, -0.37981873],
[0.28294738, -1.00125525],
[0.34218094, -0.58781961],
[0.2096964, -0.61814058],
[1.59068979, -0.96622933],
[0.73418199, -0.02222847],
[0.79270821, -0.41386668],
[1.16606871, -0.25641059],
[1.0304995, -0.16955962],
[0.48921682, -1.38504507],
[-0.03918551, -0.68540745],
[0.24991051, -1.00864997],
[0.80541964, -0.34465185],
[0.1732627, -1.61323172],
]
)
y_gt = np.array(
[
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
]
)
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_allknn_fit_resample_with_nn_object():
nn = NearestNeighbors(n_neighbors=4)
allknn = AllKNN(n_neighbors=nn, kind_sel="mode")
X_resampled, y_resampled = allknn.fit_resample(X, Y)
X_gt = np.array(
[
[-0.53171468, -0.53735182],
[-0.88864036, -0.33782387],
[-0.46226554, -0.50481004],
[-0.34474418, 0.21969797],
[-0.12840393, 0.66446571],
[1.02956816, 0.36061601],
[1.12202806, 0.33811558],
[-0.35946678, 0.72510189],
[-1.10146139, 0.91782682],
[0.73489726, 0.43915195],
[-0.28479268, 0.70459548],
[0.50307437, 0.498805],
[0.84929742, 0.41042894],
[0.62649535, 0.46600596],
[0.98382284, 0.37184502],
[0.69804044, 0.44810796],
[1.32319756, -0.13181616],
[0.04296502, -0.37981873],
[0.28294738, -1.00125525],
[0.34218094, -0.58781961],
[0.2096964, -0.61814058],
[1.59068979, -0.96622933],
[0.73418199, -0.02222847],
[0.79270821, -0.41386668],
[1.16606871, -0.25641059],
[1.0304995, -0.16955962],
[0.48921682, -1.38504507],
[-0.03918551, -0.68540745],
[0.24991051, -1.00864997],
[0.80541964, -0.34465185],
[0.1732627, -1.61323172],
]
)
y_gt = np.array(
[
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
]
)
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_alknn_not_good_object():
nn = "rnd"
allknn = AllKNN(n_neighbors=nn, kind_sel="mode")
with pytest.raises(ValueError):
allknn.fit_resample(X, Y)
| 8,773 | 23.50838 | 70 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/tests/test_condensed_nearest_neighbour.py | """Test the module condensed nearest neighbour."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numpy as np
import pytest
from sklearn.datasets import make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils._testing import assert_array_equal
from imblearn.under_sampling import CondensedNearestNeighbour
RND_SEED = 0
X = np.array(
[
[2.59928271, 0.93323465],
[0.25738379, 0.95564169],
[1.42772181, 0.526027],
[1.92365863, 0.82718767],
[-0.10903849, -0.12085181],
[-0.284881, -0.62730973],
[0.57062627, 1.19528323],
[0.03394306, 0.03986753],
[0.78318102, 2.59153329],
[0.35831463, 1.33483198],
[-0.14313184, -1.0412815],
[0.01936241, 0.17799828],
[-1.25020462, -0.40402054],
[-0.09816301, -0.74662486],
[-0.01252787, 0.34102657],
[0.52726792, -0.38735648],
[0.2821046, -0.07862747],
[0.05230552, 0.09043907],
[0.15198585, 0.12512646],
[0.70524765, 0.39816382],
]
)
Y = np.array([1, 2, 1, 1, 0, 2, 2, 2, 2, 2, 2, 0, 1, 2, 2, 2, 2, 1, 2, 1])
def test_cnn_init():
cnn = CondensedNearestNeighbour(random_state=RND_SEED)
assert cnn.n_seeds_S == 1
assert cnn.n_jobs is None
def test_cnn_fit_resample():
cnn = CondensedNearestNeighbour(random_state=RND_SEED)
X_resampled, y_resampled = cnn.fit_resample(X, Y)
X_gt = np.array(
[
[-0.10903849, -0.12085181],
[0.01936241, 0.17799828],
[0.05230552, 0.09043907],
[-1.25020462, -0.40402054],
[0.70524765, 0.39816382],
[0.35831463, 1.33483198],
[-0.284881, -0.62730973],
[0.03394306, 0.03986753],
[-0.01252787, 0.34102657],
[0.15198585, 0.12512646],
]
)
y_gt = np.array([0, 0, 1, 1, 1, 2, 2, 2, 2, 2])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
@pytest.mark.parametrize("n_neighbors", [1, KNeighborsClassifier(n_neighbors=1)])
def test_cnn_fit_resample_with_object(n_neighbors):
cnn = CondensedNearestNeighbour(random_state=RND_SEED, n_neighbors=n_neighbors)
X_resampled, y_resampled = cnn.fit_resample(X, Y)
X_gt = np.array(
[
[-0.10903849, -0.12085181],
[0.01936241, 0.17799828],
[0.05230552, 0.09043907],
[-1.25020462, -0.40402054],
[0.70524765, 0.39816382],
[0.35831463, 1.33483198],
[-0.284881, -0.62730973],
[0.03394306, 0.03986753],
[-0.01252787, 0.34102657],
[0.15198585, 0.12512646],
]
)
y_gt = np.array([0, 0, 1, 1, 1, 2, 2, 2, 2, 2])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
cnn = CondensedNearestNeighbour(random_state=RND_SEED, n_neighbors=1)
X_resampled, y_resampled = cnn.fit_resample(X, Y)
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_condensed_nearest_neighbour_multiclass():
"""Check the validity of the fitted attributes `estimators_`."""
X, y = make_classification(
n_samples=1_000,
n_classes=4,
weights=[0.1, 0.2, 0.2, 0.5],
n_clusters_per_class=1,
random_state=0,
)
cnn = CondensedNearestNeighbour(random_state=RND_SEED)
cnn.fit_resample(X, y)
assert len(cnn.estimators_) == len(cnn.sampling_strategy_)
other_classes = []
for est in cnn.estimators_:
assert est.classes_[0] == 0 # minority class
assert est.classes_[1] in {1, 2, 3} # other classes
other_classes.append(est.classes_[1])
assert len(set(other_classes)) == len(other_classes)
# TODO: remove in 0.14
def test_condensed_nearest_neighbors_deprecation():
"""Check that we raise a FutureWarning when accessing the parameter `estimator_`."""
cnn = CondensedNearestNeighbour(random_state=RND_SEED)
cnn.fit_resample(X, Y)
warn_msg = "`estimator_` attribute has been deprecated"
with pytest.warns(FutureWarning, match=warn_msg):
cnn.estimator_
| 4,227 | 31.523077 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/tests/test_one_sided_selection.py | """Test the module one-sided selection."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numpy as np
import pytest
from sklearn.datasets import make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.utils._testing import assert_array_equal
from imblearn.under_sampling import OneSidedSelection
RND_SEED = 0
X = np.array(
[
[-0.3879569, 0.6894251],
[-0.09322739, 1.28177189],
[-0.77740357, 0.74097941],
[0.91542919, -0.65453327],
[-0.03852113, 0.40910479],
[-0.43877303, 1.07366684],
[-0.85795321, 0.82980738],
[-0.18430329, 0.52328473],
[-0.30126957, -0.66268378],
[-0.65571327, 0.42412021],
[-0.28305528, 0.30284991],
[0.20246714, -0.34727125],
[1.06446472, -1.09279772],
[0.30543283, -0.02589502],
[-0.00717161, 0.00318087],
]
)
Y = np.array([0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0])
def test_oss_init():
oss = OneSidedSelection(random_state=RND_SEED)
assert oss.n_seeds_S == 1
assert oss.n_jobs is None
assert oss.random_state == RND_SEED
def test_oss_fit_resample():
oss = OneSidedSelection(random_state=RND_SEED)
X_resampled, y_resampled = oss.fit_resample(X, Y)
X_gt = np.array(
[
[-0.3879569, 0.6894251],
[0.91542919, -0.65453327],
[-0.65571327, 0.42412021],
[1.06446472, -1.09279772],
[0.30543283, -0.02589502],
[-0.00717161, 0.00318087],
[-0.09322739, 1.28177189],
[-0.77740357, 0.74097941],
[-0.43877303, 1.07366684],
[-0.85795321, 0.82980738],
[-0.30126957, -0.66268378],
[0.20246714, -0.34727125],
]
)
y_gt = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
@pytest.mark.parametrize("n_neighbors", [1, KNeighborsClassifier(n_neighbors=1)])
def test_oss_with_object(n_neighbors):
oss = OneSidedSelection(random_state=RND_SEED, n_neighbors=n_neighbors)
X_resampled, y_resampled = oss.fit_resample(X, Y)
X_gt = np.array(
[
[-0.3879569, 0.6894251],
[0.91542919, -0.65453327],
[-0.65571327, 0.42412021],
[1.06446472, -1.09279772],
[0.30543283, -0.02589502],
[-0.00717161, 0.00318087],
[-0.09322739, 1.28177189],
[-0.77740357, 0.74097941],
[-0.43877303, 1.07366684],
[-0.85795321, 0.82980738],
[-0.30126957, -0.66268378],
[0.20246714, -0.34727125],
]
)
y_gt = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
knn = 1
oss = OneSidedSelection(random_state=RND_SEED, n_neighbors=knn)
X_resampled, y_resampled = oss.fit_resample(X, Y)
assert_array_equal(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_one_sided_selection_multiclass():
"""Check the validity of the fitted attributes `estimators_`."""
X, y = make_classification(
n_samples=1_000,
n_classes=4,
weights=[0.1, 0.2, 0.2, 0.5],
n_clusters_per_class=1,
random_state=0,
)
oss = OneSidedSelection(random_state=RND_SEED)
oss.fit_resample(X, y)
assert len(oss.estimators_) == len(oss.sampling_strategy_)
other_classes = []
for est in oss.estimators_:
assert est.classes_[0] == 0 # minority class
assert est.classes_[1] in {1, 2, 3} # other classes
other_classes.append(est.classes_[1])
assert len(set(other_classes)) == len(other_classes)
# TODO: remove in 0.14
def test_one_sided_selection_deprecation():
"""Check that we raise a FutureWarning when accessing the parameter `estimator_`."""
oss = OneSidedSelection(random_state=RND_SEED)
oss.fit_resample(X, Y)
warn_msg = "`estimator_` attribute has been deprecated"
with pytest.warns(FutureWarning, match=warn_msg):
oss.estimator_
| 4,185 | 31.2 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_selection/tests/test_neighbourhood_cleaning_rule.py | """Test the module neighbourhood cleaning rule."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from collections import Counter
import numpy as np
import pytest
from sklearn.datasets import make_classification
from sklearn.utils._testing import assert_array_equal
from imblearn.under_sampling import EditedNearestNeighbours, NeighbourhoodCleaningRule
@pytest.fixture(scope="module")
def data():
return make_classification(
n_samples=200,
n_features=2,
n_informative=2,
n_redundant=0,
n_repeated=0,
n_clusters_per_class=1,
n_classes=3,
weights=[0.1, 0.3, 0.6],
random_state=0,
)
def test_ncr_threshold_cleaning(data):
"""Test the effect of the `threshold_cleaning` parameter."""
X, y = data
# with a large `threshold_cleaning`, the algorithm is equivalent to ENN
enn = EditedNearestNeighbours()
ncr = NeighbourhoodCleaningRule(
edited_nearest_neighbours=enn, n_neighbors=10, threshold_cleaning=10
)
enn.fit_resample(X, y)
ncr.fit_resample(X, y)
assert_array_equal(np.sort(enn.sample_indices_), np.sort(ncr.sample_indices_))
assert ncr.classes_to_clean_ == []
# set a threshold that we should consider only the class #2
counter = Counter(y)
threshold = counter[1] / counter[0]
ncr.set_params(threshold_cleaning=threshold)
ncr.fit_resample(X, y)
assert set(ncr.classes_to_clean_) == {2}
# making the threshold slightly smaller to take into account class #1
ncr.set_params(threshold_cleaning=threshold - np.finfo(np.float32).eps)
ncr.fit_resample(X, y)
assert set(ncr.classes_to_clean_) == {1, 2}
def test_ncr_n_neighbors(data):
"""Check the effect of the NN on the cleaning of the second phase."""
X, y = data
enn = EditedNearestNeighbours()
ncr = NeighbourhoodCleaningRule(edited_nearest_neighbours=enn, n_neighbors=3)
ncr.fit_resample(X, y)
sample_indices_3_nn = ncr.sample_indices_
ncr.set_params(n_neighbors=10).fit_resample(X, y)
sample_indices_10_nn = ncr.sample_indices_
# we should have a more aggressive cleaning with n_neighbors is larger
assert len(sample_indices_3_nn) > len(sample_indices_10_nn)
# TODO: remove in 0.14
@pytest.mark.parametrize("kind_sel", ["all", "mode"])
def test_ncr_deprecate_kind_sel(data, kind_sel):
X, y = data
with pytest.warns(FutureWarning, match="`kind_sel` is deprecated"):
NeighbourhoodCleaningRule(kind_sel=kind_sel).fit_resample(X, y)
| 2,574 | 29.294118 | 86 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_generation/__init__.py | """
The :mod:`imblearn.under_sampling.prototype_generation` submodule contains
methods that generate new samples in order to balance the dataset.
"""
from ._cluster_centroids import ClusterCentroids
__all__ = ["ClusterCentroids"]
| 232 | 24.888889 | 74 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_generation/_cluster_centroids.py | """Class to perform under-sampling by generating centroids based on
clustering."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Fernando Nogueira
# Christos Aridas
# License: MIT
import numpy as np
from scipy import sparse
from sklearn.base import clone
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import _safe_indexing
from ...utils import Substitution
from ...utils._docstring import _random_state_docstring
from ...utils._param_validation import HasMethods, StrOptions
from ..base import BaseUnderSampler
VOTING_KIND = ("auto", "hard", "soft")
@Substitution(
sampling_strategy=BaseUnderSampler._sampling_strategy_docstring,
random_state=_random_state_docstring,
)
class ClusterCentroids(BaseUnderSampler):
"""Undersample by generating centroids based on clustering methods.
Method that under samples the majority class by replacing a
cluster of majority samples by the cluster centroid of a KMeans
algorithm. This algorithm keeps N majority samples by fitting the
KMeans algorithm with N cluster to the majority class and using
the coordinates of the N cluster centroids as the new majority
samples.
Read more in the :ref:`User Guide <cluster_centroids>`.
Parameters
----------
{sampling_strategy}
{random_state}
estimator : estimator object, default=None
A scikit-learn compatible clustering method that exposes a `n_clusters`
parameter and a `cluster_centers_` fitted attribute. By default, it will
be a default :class:`~sklearn.cluster.KMeans` estimator.
voting : {{"hard", "soft", "auto"}}, default='auto'
Voting strategy to generate the new samples:
- If ``'hard'``, the nearest-neighbors of the centroids found using the
clustering algorithm will be used.
- If ``'soft'``, the centroids found by the clustering algorithm will
be used.
- If ``'auto'``, if the input is sparse, it will default on ``'hard'``
otherwise, ``'soft'`` will be used.
.. versionadded:: 0.3.0
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
estimator_ : estimator object
The validated estimator created from the `estimator` parameter.
voting_ : str
The validated voting strategy.
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
EditedNearestNeighbours : Under-sampling by editing samples.
CondensedNearestNeighbour: Under-sampling by condensing samples.
Notes
-----
Supports multi-class resampling by sampling each class independently.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from sklearn.cluster import MiniBatchKMeans
>>> from imblearn.under_sampling import ClusterCentroids
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> cc = ClusterCentroids(
... estimator=MiniBatchKMeans(n_init=1, random_state=0), random_state=42
... )
>>> X_res, y_res = cc.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{...}})
"""
_parameter_constraints: dict = {
**BaseUnderSampler._parameter_constraints,
"estimator": [HasMethods(["fit", "predict"]), None],
"voting": [StrOptions({"auto", "hard", "soft"})],
"random_state": ["random_state"],
}
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
estimator=None,
voting="auto",
):
super().__init__(sampling_strategy=sampling_strategy)
self.random_state = random_state
self.estimator = estimator
self.voting = voting
def _validate_estimator(self):
"""Private function to create the KMeans estimator"""
if self.estimator is None:
self.estimator_ = KMeans(random_state=self.random_state)
else:
self.estimator_ = clone(self.estimator)
if "n_clusters" not in self.estimator_.get_params():
raise ValueError(
"`estimator` should be a clustering estimator exposing a parameter"
" `n_clusters` and a fitted parameter `cluster_centers_`."
)
def _generate_sample(self, X, y, centroids, target_class):
if self.voting_ == "hard":
nearest_neighbors = NearestNeighbors(n_neighbors=1)
nearest_neighbors.fit(X, y)
indices = nearest_neighbors.kneighbors(centroids, return_distance=False)
X_new = _safe_indexing(X, np.squeeze(indices))
else:
if sparse.issparse(X):
X_new = sparse.csr_matrix(centroids, dtype=X.dtype)
else:
X_new = centroids
y_new = np.array([target_class] * centroids.shape[0], dtype=y.dtype)
return X_new, y_new
def _fit_resample(self, X, y):
self._validate_estimator()
if self.voting == "auto":
self.voting_ = "hard" if sparse.issparse(X) else "soft"
else:
self.voting_ = self.voting
X_resampled, y_resampled = [], []
for target_class in np.unique(y):
target_class_indices = np.flatnonzero(y == target_class)
if target_class in self.sampling_strategy_.keys():
n_samples = self.sampling_strategy_[target_class]
self.estimator_.set_params(**{"n_clusters": n_samples})
self.estimator_.fit(_safe_indexing(X, target_class_indices))
if not hasattr(self.estimator_, "cluster_centers_"):
raise RuntimeError(
"`estimator` should be a clustering estimator exposing a "
"fitted parameter `cluster_centers_`."
)
X_new, y_new = self._generate_sample(
_safe_indexing(X, target_class_indices),
_safe_indexing(y, target_class_indices),
self.estimator_.cluster_centers_,
target_class,
)
X_resampled.append(X_new)
y_resampled.append(y_new)
else:
X_resampled.append(_safe_indexing(X, target_class_indices))
y_resampled.append(_safe_indexing(y, target_class_indices))
if sparse.issparse(X):
X_resampled = sparse.vstack(X_resampled)
else:
X_resampled = np.vstack(X_resampled)
y_resampled = np.hstack(y_resampled)
return X_resampled, np.array(y_resampled, dtype=y.dtype)
def _more_tags(self):
return {"sample_indices": False}
| 7,528 | 35.548544 | 87 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_generation/tests/test_cluster_centroids.py | """Test the module cluster centroids."""
from collections import Counter
import numpy as np
import pytest
from scipy import sparse
from sklearn.cluster import KMeans
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from imblearn.under_sampling import ClusterCentroids
from imblearn.utils.testing import _CustomClusterer
RND_SEED = 0
X = np.array(
[
[0.04352327, -0.20515826],
[0.92923648, 0.76103773],
[0.20792588, 1.49407907],
[0.47104475, 0.44386323],
[0.22950086, 0.33367433],
[0.15490546, 0.3130677],
[0.09125309, -0.85409574],
[0.12372842, 0.6536186],
[0.13347175, 0.12167502],
[0.094035, -2.55298982],
]
)
Y = np.array([1, 0, 1, 0, 1, 1, 1, 1, 0, 1])
R_TOL = 1e-4
@pytest.mark.parametrize(
"X, expected_voting", [(X, "soft"), (sparse.csr_matrix(X), "hard")]
)
@pytest.mark.filterwarnings("ignore:The default value of `n_init` will change")
def test_fit_resample_check_voting(X, expected_voting):
cc = ClusterCentroids(random_state=RND_SEED)
cc.fit_resample(X, Y)
assert cc.voting_ == expected_voting
@pytest.mark.filterwarnings("ignore:The default value of `n_init` will change")
def test_fit_resample_auto():
sampling_strategy = "auto"
cc = ClusterCentroids(sampling_strategy=sampling_strategy, random_state=RND_SEED)
X_resampled, y_resampled = cc.fit_resample(X, Y)
assert X_resampled.shape == (6, 2)
assert y_resampled.shape == (6,)
@pytest.mark.filterwarnings("ignore:The default value of `n_init` will change")
def test_fit_resample_half():
sampling_strategy = {0: 3, 1: 6}
cc = ClusterCentroids(sampling_strategy=sampling_strategy, random_state=RND_SEED)
X_resampled, y_resampled = cc.fit_resample(X, Y)
assert X_resampled.shape == (9, 2)
assert y_resampled.shape == (9,)
@pytest.mark.filterwarnings("ignore:The default value of `n_init` will change")
def test_multiclass_fit_resample():
y = Y.copy()
y[5] = 2
y[6] = 2
cc = ClusterCentroids(random_state=RND_SEED)
_, y_resampled = cc.fit_resample(X, y)
count_y_res = Counter(y_resampled)
assert count_y_res[0] == 2
assert count_y_res[1] == 2
assert count_y_res[2] == 2
def test_fit_resample_object():
sampling_strategy = "auto"
cluster = KMeans(random_state=RND_SEED, n_init=1)
cc = ClusterCentroids(
sampling_strategy=sampling_strategy,
random_state=RND_SEED,
estimator=cluster,
)
X_resampled, y_resampled = cc.fit_resample(X, Y)
assert X_resampled.shape == (6, 2)
assert y_resampled.shape == (6,)
def test_fit_hard_voting():
sampling_strategy = "auto"
voting = "hard"
cluster = KMeans(random_state=RND_SEED, n_init=1)
cc = ClusterCentroids(
sampling_strategy=sampling_strategy,
random_state=RND_SEED,
estimator=cluster,
voting=voting,
)
X_resampled, y_resampled = cc.fit_resample(X, Y)
assert X_resampled.shape == (6, 2)
assert y_resampled.shape == (6,)
for x in X_resampled:
assert np.any(np.all(x == X, axis=1))
@pytest.mark.filterwarnings("ignore:The default value of `n_init` will change")
def test_cluster_centroids_hard_target_class():
# check that the samples selecting by the hard voting corresponds to the
# targeted class
# non-regression test for:
# https://github.com/scikit-learn-contrib/imbalanced-learn/issues/738
X, y = make_classification(
n_samples=1000,
n_features=2,
n_informative=1,
n_redundant=0,
n_repeated=0,
n_clusters_per_class=1,
weights=[0.3, 0.7],
class_sep=0.01,
random_state=0,
)
cc = ClusterCentroids(voting="hard", random_state=0)
X_res, y_res = cc.fit_resample(X, y)
minority_class_indices = np.flatnonzero(y == 0)
X_minority_class = X[minority_class_indices]
resampled_majority_class_indices = np.flatnonzero(y_res == 1)
X_res_majority = X_res[resampled_majority_class_indices]
sample_from_minority_in_majority = [
np.all(np.isclose(selected_sample, minority_sample))
for selected_sample in X_res_majority
for minority_sample in X_minority_class
]
assert sum(sample_from_minority_in_majority) == 0
def test_cluster_centroids_custom_clusterer():
clusterer = _CustomClusterer()
cc = ClusterCentroids(estimator=clusterer, random_state=RND_SEED)
cc.fit_resample(X, Y)
assert isinstance(cc.estimator_.cluster_centers_, np.ndarray)
clusterer = _CustomClusterer(expose_cluster_centers=False)
cc = ClusterCentroids(estimator=clusterer, random_state=RND_SEED)
err_msg = (
"`estimator` should be a clustering estimator exposing a fitted parameter "
"`cluster_centers_`."
)
with pytest.raises(RuntimeError, match=err_msg):
cc.fit_resample(X, Y)
clusterer = LogisticRegression()
cc = ClusterCentroids(estimator=clusterer, random_state=RND_SEED)
err_msg = (
"`estimator` should be a clustering estimator exposing a parameter "
"`n_clusters` and a fitted parameter `cluster_centers_`."
)
with pytest.raises(ValueError, match=err_msg):
cc.fit_resample(X, Y)
| 5,289 | 31.256098 | 85 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/under_sampling/_prototype_generation/tests/__init__.py | 0 | 0 | 0 | py | |
imbalanced-learn | imbalanced-learn-master/imblearn/combine/_smote_enn.py | """Class to perform over-sampling using SMOTE and cleaning using ENN."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numbers
from sklearn.base import clone
from sklearn.utils import check_X_y
from ..base import BaseSampler
from ..over_sampling import SMOTE
from ..over_sampling.base import BaseOverSampler
from ..under_sampling import EditedNearestNeighbours
from ..utils import Substitution, check_target_type
from ..utils._docstring import _n_jobs_docstring, _random_state_docstring
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class SMOTEENN(BaseSampler):
"""Over-sampling using SMOTE and cleaning using ENN.
Combine over- and under-sampling using SMOTE and Edited Nearest Neighbours.
Read more in the :ref:`User Guide <combine>`.
Parameters
----------
{sampling_strategy}
{random_state}
smote : sampler object, default=None
The :class:`~imblearn.over_sampling.SMOTE` object to use. If not given,
a :class:`~imblearn.over_sampling.SMOTE` object with default parameters
will be given.
enn : sampler object, default=None
The :class:`~imblearn.under_sampling.EditedNearestNeighbours` object
to use. If not given, a
:class:`~imblearn.under_sampling.EditedNearestNeighbours` object with
sampling strategy='all' will be given.
{n_jobs}
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
smote_ : sampler object
The validated :class:`~imblearn.over_sampling.SMOTE` instance.
enn_ : sampler object
The validated :class:`~imblearn.under_sampling.EditedNearestNeighbours`
instance.
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
SMOTETomek : Over-sample using SMOTE followed by under-sampling removing
the Tomek's links.
Notes
-----
The method is presented in [1]_.
Supports multi-class resampling. Refer to SMOTE and ENN regarding the
scheme which used.
References
----------
.. [1] G. Batista, R. C. Prati, M. C. Monard. "A study of the behavior of
several methods for balancing machine learning training data," ACM
Sigkdd Explorations Newsletter 6 (1), 20-29, 2004.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.combine import SMOTEENN
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> sme = SMOTEENN(random_state=42)
>>> X_res, y_res = sme.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 900, 1: 881}})
"""
_sampling_type = "over-sampling"
_parameter_constraints: dict = {
**BaseOverSampler._parameter_constraints,
"smote": [SMOTE, None],
"enn": [EditedNearestNeighbours, None],
"n_jobs": [numbers.Integral, None],
}
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
smote=None,
enn=None,
n_jobs=None,
):
super().__init__()
self.sampling_strategy = sampling_strategy
self.random_state = random_state
self.smote = smote
self.enn = enn
self.n_jobs = n_jobs
def _validate_estimator(self):
"Private function to validate SMOTE and ENN objects"
if self.smote is not None:
self.smote_ = clone(self.smote)
else:
self.smote_ = SMOTE(
sampling_strategy=self.sampling_strategy,
random_state=self.random_state,
n_jobs=self.n_jobs,
)
if self.enn is not None:
self.enn_ = clone(self.enn)
else:
self.enn_ = EditedNearestNeighbours(
sampling_strategy="all", n_jobs=self.n_jobs
)
def _fit_resample(self, X, y):
self._validate_estimator()
y = check_target_type(y)
X, y = check_X_y(X, y, accept_sparse=["csr", "csc"])
self.sampling_strategy_ = self.sampling_strategy
X_res, y_res = self.smote_.fit_resample(X, y)
return self.enn_.fit_resample(X_res, y_res)
| 5,090 | 30.425926 | 79 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/combine/_smote_tomek.py | """Class to perform over-sampling using SMOTE and cleaning using Tomek
links."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numbers
from sklearn.base import clone
from sklearn.utils import check_X_y
from ..base import BaseSampler
from ..over_sampling import SMOTE
from ..over_sampling.base import BaseOverSampler
from ..under_sampling import TomekLinks
from ..utils import Substitution, check_target_type
from ..utils._docstring import _n_jobs_docstring, _random_state_docstring
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class SMOTETomek(BaseSampler):
"""Over-sampling using SMOTE and cleaning using Tomek links.
Combine over- and under-sampling using SMOTE and Tomek links.
Read more in the :ref:`User Guide <combine>`.
Parameters
----------
{sampling_strategy}
{random_state}
smote : sampler object, default=None
The :class:`~imblearn.over_sampling.SMOTE` object to use. If not given,
a :class:`~imblearn.over_sampling.SMOTE` object with default parameters
will be given.
tomek : sampler object, default=None
The :class:`~imblearn.under_sampling.TomekLinks` object to use. If not
given, a :class:`~imblearn.under_sampling.TomekLinks` object with
sampling strategy='all' will be given.
{n_jobs}
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
smote_ : sampler object
The validated :class:`~imblearn.over_sampling.SMOTE` instance.
tomek_ : sampler object
The validated :class:`~imblearn.under_sampling.TomekLinks` instance.
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
SMOTEENN : Over-sample using SMOTE followed by under-sampling using Edited
Nearest Neighbours.
Notes
-----
The method is presented in [1]_.
Supports multi-class resampling. Refer to SMOTE and TomekLinks regarding
the scheme which used.
References
----------
.. [1] G. Batista, B. Bazzan, M. Monard, "Balancing Training Data for
Automated Annotation of Keywords: a Case Study," In WOB, 10-18, 2003.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.combine import SMOTETomek
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> smt = SMOTETomek(random_state=42)
>>> X_res, y_res = smt.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 900, 1: 900}})
"""
_sampling_type = "over-sampling"
_parameter_constraints: dict = {
**BaseOverSampler._parameter_constraints,
"smote": [SMOTE, None],
"tomek": [TomekLinks, None],
"n_jobs": [numbers.Integral, None],
}
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
smote=None,
tomek=None,
n_jobs=None,
):
super().__init__()
self.sampling_strategy = sampling_strategy
self.random_state = random_state
self.smote = smote
self.tomek = tomek
self.n_jobs = n_jobs
def _validate_estimator(self):
"Private function to validate SMOTE and ENN objects"
if self.smote is not None:
self.smote_ = clone(self.smote)
else:
self.smote_ = SMOTE(
sampling_strategy=self.sampling_strategy,
random_state=self.random_state,
n_jobs=self.n_jobs,
)
if self.tomek is not None:
self.tomek_ = clone(self.tomek)
else:
self.tomek_ = TomekLinks(sampling_strategy="all", n_jobs=self.n_jobs)
def _fit_resample(self, X, y):
self._validate_estimator()
y = check_target_type(y)
X, y = check_X_y(X, y, accept_sparse=["csr", "csc"])
self.sampling_strategy_ = self.sampling_strategy
X_res, y_res = self.smote_.fit_resample(X, y)
return self.tomek_.fit_resample(X_res, y_res)
| 4,948 | 30.125786 | 81 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/combine/__init__.py | """The :mod:`imblearn.combine` provides methods which combine
over-sampling and under-sampling.
"""
from ._smote_enn import SMOTEENN
from ._smote_tomek import SMOTETomek
__all__ = ["SMOTEENN", "SMOTETomek"]
| 209 | 22.333333 | 61 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/combine/tests/test_smote_enn.py | """Test the module SMOTE ENN."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numpy as np
from sklearn.utils._testing import assert_allclose, assert_array_equal
from imblearn.combine import SMOTEENN
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import EditedNearestNeighbours
RND_SEED = 0
X = np.array(
[
[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
]
)
Y = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0])
R_TOL = 1e-4
def test_sample_regular():
smote = SMOTEENN(random_state=RND_SEED)
X_resampled, y_resampled = smote.fit_resample(X, Y)
X_gt = np.array(
[
[1.52091956, -0.49283504],
[0.84976473, -0.15570176],
[0.61319159, -0.11571667],
[0.66052536, -0.28246518],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.08711622, 0.93259929],
]
)
y_gt = np.array([0, 0, 0, 0, 1, 1, 1])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_sample_regular_pass_smote_enn():
smote = SMOTEENN(
smote=SMOTE(sampling_strategy="auto", random_state=RND_SEED),
enn=EditedNearestNeighbours(sampling_strategy="all"),
random_state=RND_SEED,
)
X_resampled, y_resampled = smote.fit_resample(X, Y)
X_gt = np.array(
[
[1.52091956, -0.49283504],
[0.84976473, -0.15570176],
[0.61319159, -0.11571667],
[0.66052536, -0.28246518],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.08711622, 0.93259929],
]
)
y_gt = np.array([0, 0, 0, 0, 1, 1, 1])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_sample_regular_half():
sampling_strategy = {0: 10, 1: 12}
smote = SMOTEENN(sampling_strategy=sampling_strategy, random_state=RND_SEED)
X_resampled, y_resampled = smote.fit_resample(X, Y)
X_gt = np.array(
[
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.08711622, 0.93259929],
]
)
y_gt = np.array([0, 1, 1, 1])
assert_allclose(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
def test_validate_estimator_init():
smote = SMOTE(random_state=RND_SEED)
enn = EditedNearestNeighbours(sampling_strategy="all")
smt = SMOTEENN(smote=smote, enn=enn, random_state=RND_SEED)
X_resampled, y_resampled = smt.fit_resample(X, Y)
X_gt = np.array(
[
[1.52091956, -0.49283504],
[0.84976473, -0.15570176],
[0.61319159, -0.11571667],
[0.66052536, -0.28246518],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.08711622, 0.93259929],
]
)
y_gt = np.array([0, 0, 0, 0, 1, 1, 1])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_validate_estimator_default():
smt = SMOTEENN(random_state=RND_SEED)
X_resampled, y_resampled = smt.fit_resample(X, Y)
X_gt = np.array(
[
[1.52091956, -0.49283504],
[0.84976473, -0.15570176],
[0.61319159, -0.11571667],
[0.66052536, -0.28246518],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.08711622, 0.93259929],
]
)
y_gt = np.array([0, 0, 0, 0, 1, 1, 1])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_parallelisation():
# Check if default job count is none
smt = SMOTEENN(random_state=RND_SEED)
smt._validate_estimator()
assert smt.n_jobs is None
assert smt.smote_.n_jobs is None
assert smt.enn_.n_jobs is None
# Check if job count is set
smt = SMOTEENN(random_state=RND_SEED, n_jobs=8)
smt._validate_estimator()
assert smt.n_jobs == 8
assert smt.smote_.n_jobs == 8
assert smt.enn_.n_jobs == 8
| 4,800 | 29.386076 | 80 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/combine/tests/__init__.py | 0 | 0 | 0 | py | |
imbalanced-learn | imbalanced-learn-master/imblearn/combine/tests/test_smote_tomek.py | """Test the module SMOTE ENN."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numpy as np
from sklearn.utils._testing import assert_allclose, assert_array_equal
from imblearn.combine import SMOTETomek
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import TomekLinks
RND_SEED = 0
X = np.array(
[
[0.20622591, 0.0582794],
[0.68481731, 0.51935141],
[1.34192108, -0.13367336],
[0.62366841, -0.21312976],
[1.61091956, -0.40283504],
[-0.37162401, -2.19400981],
[0.74680821, 1.63827342],
[0.2184254, 0.24299982],
[0.61472253, -0.82309052],
[0.19893132, -0.47761769],
[1.06514042, -0.0770537],
[0.97407872, 0.44454207],
[1.40301027, -0.83648734],
[-1.20515198, -1.02689695],
[-0.27410027, -0.54194484],
[0.8381014, 0.44085498],
[-0.23374509, 0.18370049],
[-0.32635887, -0.29299653],
[-0.00288378, 0.84259929],
[1.79580611, -0.02219234],
]
)
Y = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0])
R_TOL = 1e-4
def test_sample_regular():
smote = SMOTETomek(random_state=RND_SEED)
X_resampled, y_resampled = smote.fit_resample(X, Y)
X_gt = np.array(
[
[0.68481731, 0.51935141],
[1.34192108, -0.13367336],
[0.62366841, -0.21312976],
[1.61091956, -0.40283504],
[-0.37162401, -2.19400981],
[0.74680821, 1.63827342],
[0.61472253, -0.82309052],
[0.19893132, -0.47761769],
[1.40301027, -0.83648734],
[-1.20515198, -1.02689695],
[-0.23374509, 0.18370049],
[-0.00288378, 0.84259929],
[1.79580611, -0.02219234],
[0.38307743, -0.05670439],
[0.70319159, -0.02571667],
[0.75052536, -0.19246518],
]
)
y_gt = np.array([1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_sample_regular_half():
sampling_strategy = {0: 9, 1: 12}
smote = SMOTETomek(sampling_strategy=sampling_strategy, random_state=RND_SEED)
X_resampled, y_resampled = smote.fit_resample(X, Y)
X_gt = np.array(
[
[0.68481731, 0.51935141],
[0.62366841, -0.21312976],
[1.61091956, -0.40283504],
[-0.37162401, -2.19400981],
[0.74680821, 1.63827342],
[0.61472253, -0.82309052],
[0.19893132, -0.47761769],
[1.40301027, -0.83648734],
[-1.20515198, -1.02689695],
[-0.23374509, 0.18370049],
[-0.00288378, 0.84259929],
[1.79580611, -0.02219234],
[0.45784496, -0.1053161],
]
)
y_gt = np.array([1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_validate_estimator_init():
smote = SMOTE(random_state=RND_SEED)
tomek = TomekLinks(sampling_strategy="all")
smt = SMOTETomek(smote=smote, tomek=tomek, random_state=RND_SEED)
X_resampled, y_resampled = smt.fit_resample(X, Y)
X_gt = np.array(
[
[0.68481731, 0.51935141],
[1.34192108, -0.13367336],
[0.62366841, -0.21312976],
[1.61091956, -0.40283504],
[-0.37162401, -2.19400981],
[0.74680821, 1.63827342],
[0.61472253, -0.82309052],
[0.19893132, -0.47761769],
[1.40301027, -0.83648734],
[-1.20515198, -1.02689695],
[-0.23374509, 0.18370049],
[-0.00288378, 0.84259929],
[1.79580611, -0.02219234],
[0.38307743, -0.05670439],
[0.70319159, -0.02571667],
[0.75052536, -0.19246518],
]
)
y_gt = np.array([1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_validate_estimator_default():
smt = SMOTETomek(random_state=RND_SEED)
X_resampled, y_resampled = smt.fit_resample(X, Y)
X_gt = np.array(
[
[0.68481731, 0.51935141],
[1.34192108, -0.13367336],
[0.62366841, -0.21312976],
[1.61091956, -0.40283504],
[-0.37162401, -2.19400981],
[0.74680821, 1.63827342],
[0.61472253, -0.82309052],
[0.19893132, -0.47761769],
[1.40301027, -0.83648734],
[-1.20515198, -1.02689695],
[-0.23374509, 0.18370049],
[-0.00288378, 0.84259929],
[1.79580611, -0.02219234],
[0.38307743, -0.05670439],
[0.70319159, -0.02571667],
[0.75052536, -0.19246518],
]
)
y_gt = np.array([1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_parallelisation():
# Check if default job count is None
smt = SMOTETomek(random_state=RND_SEED)
smt._validate_estimator()
assert smt.n_jobs is None
assert smt.smote_.n_jobs is None
assert smt.tomek_.n_jobs is None
# Check if job count is set
smt = SMOTETomek(random_state=RND_SEED, n_jobs=8)
smt._validate_estimator()
assert smt.n_jobs == 8
assert smt.smote_.n_jobs == 8
assert smt.tomek_.n_jobs == 8
| 5,576 | 32.196429 | 82 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/metrics/pairwise.py | """Metrics to perform pairwise computation."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
import numbers
import numpy as np
from scipy.spatial import distance_matrix
from sklearn.base import BaseEstimator
from sklearn.utils import check_consistent_length
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.validation import check_is_fitted
from ..base import _ParamsValidationMixin
from ..utils._param_validation import StrOptions
class ValueDifferenceMetric(_ParamsValidationMixin, BaseEstimator):
r"""Class implementing the Value Difference Metric.
This metric computes the distance between samples containing only
categorical features. The distance between feature values of two samples is
defined as:
.. math::
\delta(x, y) = \sum_{c=1}^{C} |p(c|x_{f}) - p(c|y_{f})|^{k} \ ,
where :math:`x` and :math:`y` are two samples and :math:`f` a given
feature, :math:`C` is the number of classes, :math:`p(c|x_{f})` is the
conditional probability that the output class is :math:`c` given that
the feature value :math:`f` has the value :math:`x` and :math:`k` an
exponent usually defined to 1 or 2.
The distance for the feature vectors :math:`X` and :math:`Y` is
subsequently defined as:
.. math::
\Delta(X, Y) = \sum_{f=1}^{F} \delta(X_{f}, Y_{f})^{r} \ ,
where :math:`F` is the number of feature and :math:`r` an exponent usually
defined equal to 1 or 2.
The definition of this distance was propoed in [1]_.
Read more in the :ref:`User Guide <vdm>`.
.. versionadded:: 0.8
Parameters
----------
n_categories : "auto" or array-like of shape (n_features,), default="auto"
The number of unique categories per features. If `"auto"`, the number
of categories will be computed from `X` at `fit`. Otherwise, you can
provide an array-like of such counts to avoid computation. You can use
the fitted attribute `categories_` of the
:class:`~sklearn.preprocesssing.OrdinalEncoder` to deduce these counts.
k : int, default=1
Exponent used to compute the distance between feature value.
r : int, default=2
Exponent used to compute the distance between the feature vector.
Attributes
----------
n_categories_ : ndarray of shape (n_features,)
The number of categories per features.
proba_per_class_ : list of ndarray of shape (n_categories, n_classes)
List of length `n_features` containing the conditional probabilities
for each category given a class.
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.10
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
sklearn.neighbors.DistanceMetric : Interface for fast metric computation.
Notes
-----
The input data `X` are expected to be encoded by an
:class:`~sklearn.preprocessing.OrdinalEncoder` and the data type is used
should be `np.int32`. If other data types are given, `X` will be converted
to `np.int32`.
References
----------
.. [1] Stanfill, Craig, and David Waltz. "Toward memory-based reasoning."
Communications of the ACM 29.12 (1986): 1213-1228.
Examples
--------
>>> import numpy as np
>>> X = np.array(["green"] * 10 + ["red"] * 10 + ["blue"] * 10).reshape(-1, 1)
>>> y = [1] * 8 + [0] * 5 + [1] * 7 + [0] * 9 + [1]
>>> from sklearn.preprocessing import OrdinalEncoder
>>> encoder = OrdinalEncoder(dtype=np.int32)
>>> X_encoded = encoder.fit_transform(X)
>>> from imblearn.metrics.pairwise import ValueDifferenceMetric
>>> vdm = ValueDifferenceMetric().fit(X_encoded, y)
>>> pairwise_distance = vdm.pairwise(X_encoded)
>>> pairwise_distance.shape
(30, 30)
>>> X_test = np.array(["green", "red", "blue"]).reshape(-1, 1)
>>> X_test_encoded = encoder.transform(X_test)
>>> vdm.pairwise(X_test_encoded)
array([[0. , 0.04, 1.96],
[0.04, 0. , 1.44],
[1.96, 1.44, 0. ]])
"""
_parameter_constraints: dict = {
"n_categories": [StrOptions({"auto"}), "array-like"],
"k": [numbers.Integral],
"r": [numbers.Integral],
}
def __init__(self, *, n_categories="auto", k=1, r=2):
self.n_categories = n_categories
self.k = k
self.r = r
def fit(self, X, y):
"""Compute the necessary statistics from the training set.
Parameters
----------
X : ndarray of shape (n_samples, n_features), dtype=np.int32
The input data. The data are expected to be encoded with a
:class:`~sklearn.preprocessing.OrdinalEncoder`.
y : ndarray of shape (n_features,)
The target.
Returns
-------
self : object
Return the instance itself.
"""
self._validate_params()
check_consistent_length(X, y)
X, y = self._validate_data(X, y, reset=True, dtype=np.int32)
if isinstance(self.n_categories, str) and self.n_categories == "auto":
# categories are expected to be encoded from 0 to n_categories - 1
self.n_categories_ = X.max(axis=0) + 1
else:
if len(self.n_categories) != self.n_features_in_:
raise ValueError(
f"The length of n_categories is not consistent with the "
f"number of feature in X. Got {len(self.n_categories)} "
f"elements in n_categories and {self.n_features_in_} in "
f"X."
)
self.n_categories_ = np.array(self.n_categories, copy=False)
classes = unique_labels(y)
# list of length n_features of ndarray (n_categories, n_classes)
# compute the counts
self.proba_per_class_ = [
np.empty(shape=(n_cat, len(classes)), dtype=np.float64)
for n_cat in self.n_categories_
]
for feature_idx in range(self.n_features_in_):
for klass_idx, klass in enumerate(classes):
self.proba_per_class_[feature_idx][:, klass_idx] = np.bincount(
X[y == klass, feature_idx],
minlength=self.n_categories_[feature_idx],
)
# normalize by the summing over the classes
with np.errstate(invalid="ignore"):
# silence potential warning due to in-place division by zero
for feature_idx in range(self.n_features_in_):
self.proba_per_class_[feature_idx] /= (
self.proba_per_class_[feature_idx].sum(axis=1).reshape(-1, 1)
)
np.nan_to_num(self.proba_per_class_[feature_idx], copy=False)
return self
def pairwise(self, X, Y=None):
"""Compute the VDM distance pairwise.
Parameters
----------
X : ndarray of shape (n_samples, n_features), dtype=np.int32
The input data. The data are expected to be encoded with a
:class:`~sklearn.preprocessing.OrdinalEncoder`.
Y : ndarray of shape (n_samples, n_features), dtype=np.int32
The input data. The data are expected to be encoded with a
:class:`~sklearn.preprocessing.OrdinalEncoder`.
Returns
-------
distance_matrix : ndarray of shape (n_samples, n_samples)
The VDM pairwise distance.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False, dtype=np.int32)
n_samples_X = X.shape[0]
if Y is not None:
Y = self._validate_data(Y, reset=False, dtype=np.int32)
n_samples_Y = Y.shape[0]
else:
n_samples_Y = n_samples_X
distance = np.zeros(shape=(n_samples_X, n_samples_Y), dtype=np.float64)
for feature_idx in range(self.n_features_in_):
proba_feature_X = self.proba_per_class_[feature_idx][X[:, feature_idx]]
if Y is not None:
proba_feature_Y = self.proba_per_class_[feature_idx][Y[:, feature_idx]]
else:
proba_feature_Y = proba_feature_X
distance += (
distance_matrix(proba_feature_X, proba_feature_Y, p=self.k) ** self.r
)
return distance
def _more_tags(self):
return {
"requires_positive_X": True, # X should be encoded with OrdinalEncoder
}
| 8,670 | 35.897872 | 87 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/metrics/_classification.py | # coding: utf-8
"""Metrics to assess performance on a classification task given class
predictions. The available metrics are complementary from the metrics available
in scikit-learn.
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Dariusz Brzezinski
# License: MIT
import functools
import numbers
import warnings
from inspect import signature
import numpy as np
import scipy as sp
from sklearn.metrics import mean_absolute_error, precision_recall_fscore_support
from sklearn.metrics._classification import _check_targets, _prf_divide
from sklearn.preprocessing import LabelEncoder
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.validation import check_consistent_length, column_or_1d
from ..utils._param_validation import Interval, StrOptions, validate_params
@validate_params(
{
"y_true": ["array-like"],
"y_pred": ["array-like"],
"labels": ["array-like", None],
"pos_label": [str, numbers.Integral, None],
"average": [
None,
StrOptions({"binary", "micro", "macro", "weighted", "samples"}),
],
"warn_for": ["array-like"],
"sample_weight": ["array-like", None],
},
prefer_skip_nested_validation=True,
)
def sensitivity_specificity_support(
y_true,
y_pred,
*,
labels=None,
pos_label=1,
average=None,
warn_for=("sensitivity", "specificity"),
sample_weight=None,
):
"""Compute sensitivity, specificity, and support for each class.
The sensitivity is the ratio ``tp / (tp + fn)`` where ``tp`` is the number
of true positives and ``fn`` the number of false negatives. The sensitivity
quantifies the ability to avoid false negatives_[1].
The specificity is the ratio ``tn / (tn + fp)`` where ``tn`` is the number
of true negatives and ``fn`` the number of false negatives. The specificity
quantifies the ability to avoid false positives_[1].
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average sensitivity and specificity if ``average``
is one of ``'weighted'``.
Read more in the :ref:`User Guide <sensitivity_specificity>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated targets as returned by a classifier.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str, int or None, default=1
The class to report if ``average='binary'`` and the data is binary.
If ``pos_label is None`` and in binary classification, this function
returns the average sensitivity and specificity if ``average``
is one of ``'weighted'``.
If the data are multiclass, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : str, default=None
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set of {{"sensitivity", "specificity"}}, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
sensitivity : float (if `average is None`) or ndarray of \
shape (n_unique_labels,)
The sensitivity metric.
specificity : float (if `average is None`) or ndarray of \
shape (n_unique_labels,)
The specificity metric.
support : int (if `average is None`) or ndarray of \
shape (n_unique_labels,)
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Sensitivity and specificity
<https://en.wikipedia.org/wiki/Sensitivity_and_specificity>`_
Examples
--------
>>> import numpy as np
>>> from imblearn.metrics import sensitivity_specificity_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> sensitivity_specificity_support(y_true, y_pred, average='macro')
(0.33..., 0.66..., None)
>>> sensitivity_specificity_support(y_true, y_pred, average='micro')
(0.33..., 0.66..., None)
>>> sensitivity_specificity_support(y_true, y_pred, average='weighted')
(0.33..., 0.66..., None)
"""
average_options = (None, "micro", "macro", "weighted", "samples")
if average not in average_options and average != "binary":
raise ValueError("average has to be one of " + str(average_options))
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == "binary":
if y_type == "binary":
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0.0, 0.0, 0)
else:
raise ValueError(
"pos_label=%r is not a valid label: %r"
% (pos_label, present_labels)
)
labels = [pos_label]
else:
raise ValueError(
"Target is %s but average='binary'. Please "
"choose another average setting." % y_type
)
elif pos_label not in (None, 1):
warnings.warn(
"Note that pos_label (set to %r) is ignored when "
"average != 'binary' (got %r). You may use "
"labels=[pos_label] to specify a single positive class."
% (pos_label, average),
UserWarning,
)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack(
[labels, np.setdiff1d(present_labels, labels, assume_unique=True)]
)
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith("multilabel"):
raise ValueError("imblearn does not support multilabel")
elif average == "samples":
raise ValueError(
"Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead."
)
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = np.bincount(
tp_bins, weights=tp_bins_weights, minlength=len(labels)
)
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = np.bincount(y_pred, weights=sample_weight, minlength=len(labels))
if len(y_true):
true_sum = np.bincount(y_true, weights=sample_weight, minlength=len(labels))
# Compute the true negative
tn_sum = y_true.size - (pred_sum + true_sum - tp_sum)
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
tn_sum = tn_sum[indices]
if average == "micro":
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
tn_sum = np.array([tn_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
with np.errstate(divide="ignore", invalid="ignore"):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
specificity = _prf_divide(
tn_sum,
tn_sum + pred_sum - tp_sum,
"specificity",
"predicted",
average,
warn_for,
)
sensitivity = _prf_divide(
tp_sum, true_sum, "sensitivity", "true", average, warn_for
)
# Average the results
if average == "weighted":
weights = true_sum
if weights.sum() == 0:
return 0, 0, None
elif average == "samples":
weights = sample_weight
else:
weights = None
if average is not None:
assert average != "binary" or len(specificity) == 1
specificity = np.average(specificity, weights=weights)
sensitivity = np.average(sensitivity, weights=weights)
true_sum = None # return no support
return sensitivity, specificity, true_sum
@validate_params(
{
"y_true": ["array-like"],
"y_pred": ["array-like"],
"labels": ["array-like", None],
"pos_label": [str, numbers.Integral, None],
"average": [
None,
StrOptions({"binary", "micro", "macro", "weighted", "samples"}),
],
"sample_weight": ["array-like", None],
},
prefer_skip_nested_validation=True,
)
def sensitivity_score(
y_true,
y_pred,
*,
labels=None,
pos_label=1,
average="binary",
sample_weight=None,
):
"""Compute the sensitivity.
The sensitivity is the ratio ``tp / (tp + fn)`` where ``tp`` is the number
of true positives and ``fn`` the number of false negatives. The sensitivity
quantifies the ability to avoid false negatives.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <sensitivity_specificity>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated targets as returned by a classifier.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average.
pos_label : str, int or None, default=1
The class to report if ``average='binary'`` and the data is binary.
If ``pos_label is None`` and in binary classification, this function
returns the average sensitivity if ``average`` is one of ``'weighted'``.
If the data are multiclass, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : str, default=None
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
specificity : float (if `average is None`) or ndarray of \
shape (n_unique_labels,)
The specifcity metric.
Examples
--------
>>> import numpy as np
>>> from imblearn.metrics import sensitivity_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> sensitivity_score(y_true, y_pred, average='macro')
0.33...
>>> sensitivity_score(y_true, y_pred, average='micro')
0.33...
>>> sensitivity_score(y_true, y_pred, average='weighted')
0.33...
>>> sensitivity_score(y_true, y_pred, average=None)
array([1., 0., 0.])
"""
s, _, _ = sensitivity_specificity_support(
y_true,
y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=("sensitivity",),
sample_weight=sample_weight,
)
return s
@validate_params(
{
"y_true": ["array-like"],
"y_pred": ["array-like"],
"labels": ["array-like", None],
"pos_label": [str, numbers.Integral, None],
"average": [
None,
StrOptions({"binary", "micro", "macro", "weighted", "samples"}),
],
"sample_weight": ["array-like", None],
},
prefer_skip_nested_validation=True,
)
def specificity_score(
y_true,
y_pred,
*,
labels=None,
pos_label=1,
average="binary",
sample_weight=None,
):
"""Compute the specificity.
The specificity is the ratio ``tn / (tn + fp)`` where ``tn`` is the number
of true negatives and ``fp`` the number of false positives. The specificity
quantifies the ability to avoid false positives.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <sensitivity_specificity>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated targets as returned by a classifier.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average.
pos_label : str, int or None, default=1
The class to report if ``average='binary'`` and the data is binary.
If ``pos_label is None`` and in binary classification, this function
returns the average specificity if ``average`` is one of ``'weighted'``.
If the data are multiclass, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : str, default=None
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
specificity : float (if `average is None`) or ndarray of \
shape (n_unique_labels,)
The specificity metric.
Examples
--------
>>> import numpy as np
>>> from imblearn.metrics import specificity_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> specificity_score(y_true, y_pred, average='macro')
0.66...
>>> specificity_score(y_true, y_pred, average='micro')
0.66...
>>> specificity_score(y_true, y_pred, average='weighted')
0.66...
>>> specificity_score(y_true, y_pred, average=None)
array([0.75, 0.5 , 0.75])
"""
_, s, _ = sensitivity_specificity_support(
y_true,
y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=("specificity",),
sample_weight=sample_weight,
)
return s
@validate_params(
{
"y_true": ["array-like"],
"y_pred": ["array-like"],
"labels": ["array-like", None],
"pos_label": [str, numbers.Integral, None],
"average": [
None,
StrOptions(
{"binary", "micro", "macro", "weighted", "samples", "multiclass"}
),
],
"sample_weight": ["array-like", None],
"correction": [Interval(numbers.Real, 0, None, closed="left")],
},
prefer_skip_nested_validation=True,
)
def geometric_mean_score(
y_true,
y_pred,
*,
labels=None,
pos_label=1,
average="multiclass",
sample_weight=None,
correction=0.0,
):
"""Compute the geometric mean.
The geometric mean (G-mean) is the root of the product of class-wise
sensitivity. This measure tries to maximize the accuracy on each of the
classes while keeping these accuracies balanced. For binary classification
G-mean is the squared root of the product of the sensitivity
and specificity. For multi-class problems it is a higher root of the
product of sensitivity for each class.
For compatibility with other imbalance performance measures, G-mean can be
calculated for each class separately on a one-vs-rest basis when
``average != 'multiclass'``.
The best value is 1 and the worst value is 0. Traditionally if at least one
class is unrecognized by the classifier, G-mean resolves to zero. To
alleviate this property, for highly multi-class the sensitivity of
unrecognized classes can be "corrected" to be a user specified value
(instead of zero). This option works only if ``average == 'multiclass'``.
Read more in the :ref:`User Guide <imbalanced_metrics>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,)
Estimated targets as returned by a classifier.
labels : array-like, default=None
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average.
pos_label : str, int or None, default=1
The class to report if ``average='binary'`` and the data is binary.
If ``pos_label is None`` and in binary classification, this function
returns the average geometric mean if ``average`` is one of
``'weighted'``.
If the data are multiclass, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : str or None, default='multiclass'
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'multiclass'``:
No average is taken.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
correction : float, default=0.0
Substitutes sensitivity of unrecognized classes from zero to a given
value.
Returns
-------
geometric_mean : float
Returns the geometric mean.
Notes
-----
See :ref:`sphx_glr_auto_examples_evaluation_plot_metrics.py`.
References
----------
.. [1] Kubat, M. and Matwin, S. "Addressing the curse of
imbalanced training sets: one-sided selection" ICML (1997)
.. [2] Barandela, R., Sánchez, J. S., Garcıa, V., & Rangel, E. "Strategies
for learning in class imbalance problems", Pattern Recognition,
36(3), (2003), pp 849-851.
Examples
--------
>>> from imblearn.metrics import geometric_mean_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> geometric_mean_score(y_true, y_pred)
0.0
>>> geometric_mean_score(y_true, y_pred, correction=0.001)
0.010...
>>> geometric_mean_score(y_true, y_pred, average='macro')
0.471...
>>> geometric_mean_score(y_true, y_pred, average='micro')
0.471...
>>> geometric_mean_score(y_true, y_pred, average='weighted')
0.471...
>>> geometric_mean_score(y_true, y_pred, average=None)
array([0.866..., 0. , 0. ])
"""
if average is None or average != "multiclass":
sen, spe, _ = sensitivity_specificity_support(
y_true,
y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=("specificity", "specificity"),
sample_weight=sample_weight,
)
return np.sqrt(sen * spe)
else:
present_labels = unique_labels(y_true, y_pred)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack(
[labels, np.setdiff1d(present_labels, labels, assume_unique=True)]
)
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = np.bincount(
tp_bins, weights=tp_bins_weights, minlength=len(labels)
)
else:
# Pathological case
true_sum = tp_sum = np.zeros(len(labels))
if len(y_true):
true_sum = np.bincount(y_true, weights=sample_weight, minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
with np.errstate(divide="ignore", invalid="ignore"):
recall = _prf_divide(tp_sum, true_sum, "recall", "true", None, "recall")
recall[recall == 0] = correction
with np.errstate(divide="ignore", invalid="ignore"):
gmean = sp.stats.gmean(recall)
# old version of scipy return MaskedConstant instead of 0.0
if isinstance(gmean, np.ma.core.MaskedConstant):
return 0.0
return gmean
@validate_params(
{"alpha": [numbers.Real], "squared": ["boolean"]},
prefer_skip_nested_validation=True,
)
def make_index_balanced_accuracy(*, alpha=0.1, squared=True):
"""Balance any scoring function using the index balanced accuracy.
This factory function wraps scoring function to express it as the
index balanced accuracy (IBA). You need to use this function to
decorate any scoring function.
Only metrics requiring ``y_pred`` can be corrected with the index
balanced accuracy. ``y_score`` cannot be used since the dominance
cannot be computed.
Read more in the :ref:`User Guide <imbalanced_metrics>`.
Parameters
----------
alpha : float, default=0.1
Weighting factor.
squared : bool, default=True
If ``squared`` is True, then the metric computed will be squared
before to be weighted.
Returns
-------
iba_scoring_func : callable,
Returns the scoring metric decorated which will automatically compute
the index balanced accuracy.
Notes
-----
See :ref:`sphx_glr_auto_examples_evaluation_plot_metrics.py`.
References
----------
.. [1] García, Vicente, Javier Salvador Sánchez, and Ramón Alberto
Mollineda. "On the effectiveness of preprocessing methods when dealing
with different levels of class imbalance." Knowledge-Based Systems 25.1
(2012): 13-21.
Examples
--------
>>> from imblearn.metrics import geometric_mean_score as gmean
>>> from imblearn.metrics import make_index_balanced_accuracy as iba
>>> gmean = iba(alpha=0.1, squared=True)(gmean)
>>> y_true = [1, 0, 0, 1, 0, 1]
>>> y_pred = [0, 0, 1, 1, 0, 1]
>>> print(gmean(y_true, y_pred, average=None))
[0.44... 0.44...]
"""
def decorate(scoring_func):
@functools.wraps(scoring_func)
def compute_score(*args, **kwargs):
signature_scoring_func = signature(scoring_func)
params_scoring_func = set(signature_scoring_func.parameters.keys())
# check that the scoring function does not need a score
# and only a prediction
prohibitied_y_pred = set(["y_score", "y_prob", "y2"])
if prohibitied_y_pred.intersection(params_scoring_func):
raise AttributeError(
f"The function {scoring_func.__name__} has an unsupported"
f" attribute. Metric with`y_pred` are the"
f" only supported metrics is the only"
f" supported."
)
args_scoring_func = signature_scoring_func.bind(*args, **kwargs)
args_scoring_func.apply_defaults()
_score = scoring_func(*args_scoring_func.args, **args_scoring_func.kwargs)
if squared:
_score = np.power(_score, 2)
signature_sens_spec = signature(sensitivity_specificity_support)
params_sens_spec = set(signature_sens_spec.parameters.keys())
common_params = params_sens_spec.intersection(
set(args_scoring_func.arguments.keys())
)
args_sens_spec = {k: args_scoring_func.arguments[k] for k in common_params}
if scoring_func.__name__ == "geometric_mean_score":
if "average" in args_sens_spec:
if args_sens_spec["average"] == "multiclass":
args_sens_spec["average"] = "macro"
elif (
scoring_func.__name__ == "accuracy_score"
or scoring_func.__name__ == "jaccard_score"
):
# We do not support multilabel so the only average supported
# is binary
args_sens_spec["average"] = "binary"
sensitivity, specificity, _ = sensitivity_specificity_support(
**args_sens_spec
)
dominance = sensitivity - specificity
return (1.0 + alpha * dominance) * _score
return compute_score
return decorate
@validate_params(
{
"y_true": ["array-like"],
"y_pred": ["array-like"],
"labels": ["array-like", None],
"target_names": ["array-like", None],
"sample_weight": ["array-like", None],
"digits": [Interval(numbers.Integral, 0, None, closed="left")],
"alpha": [numbers.Real],
"output_dict": ["boolean"],
"zero_division": [
StrOptions({"warn"}),
Interval(numbers.Integral, 0, 1, closed="both"),
],
},
prefer_skip_nested_validation=True,
)
def classification_report_imbalanced(
y_true,
y_pred,
*,
labels=None,
target_names=None,
sample_weight=None,
digits=2,
alpha=0.1,
output_dict=False,
zero_division="warn",
):
"""Build a classification report based on metrics used with imbalanced dataset.
Specific metrics have been proposed to evaluate the classification
performed on imbalanced dataset. This report compiles the
state-of-the-art metrics: precision/recall/specificity, geometric
mean, and index balanced accuracy of the
geometric mean.
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array-like of shape (n_labels,), default=None
Optional list of label indices to include in the report.
target_names : list of str of shape (n_labels,), default=None
Optional display names matching the labels (same order).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
digits : int, default=2
Number of digits for formatting output floating point values.
When ``output_dict`` is ``True``, this will be ignored and the
returned values will not be rounded.
alpha : float, default=0.1
Weighting factor.
output_dict : bool, default=False
If True, return output as dict.
.. versionadded:: 0.8
zero_division : "warn" or {0, 1}, default="warn"
Sets the value to return when there is a zero division. If set to
"warn", this acts as 0, but warnings are also raised.
.. versionadded:: 0.8
Returns
-------
report : string / dict
Text summary of the precision, recall, specificity, geometric mean,
and index balanced accuracy.
Dictionary returned if output_dict is True. Dictionary has the
following structure::
{'label 1': {'pre':0.5,
'rec':1.0,
...
},
'label 2': { ... },
...
}
Examples
--------
>>> import numpy as np
>>> from imblearn.metrics import classification_report_imbalanced
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report_imbalanced(y_true, y_pred, \
target_names=target_names))
pre rec spe f1 geo iba\
sup
<BLANKLINE>
class 0 0.50 1.00 0.75 0.67 0.87 0.77\
1
class 1 0.00 0.00 0.75 0.00 0.00 0.00\
1
class 2 1.00 0.67 1.00 0.80 0.82 0.64\
3
<BLANKLINE>
avg / total 0.70 0.60 0.90 0.61 0.66 0.54\
5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = "avg / total"
if target_names is None:
target_names = [f"{label}" for label in labels]
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(last_line_heading), digits)
headers = ["pre", "rec", "spe", "f1", "geo", "iba", "sup"]
fmt = "%% %ds" % width # first column: class name
fmt += " "
fmt += " ".join(["% 9s" for _ in headers])
fmt += "\n"
headers = [""] + headers
report = fmt % tuple(headers)
report += "\n"
# Compute the different metrics
# Precision/recall/f1
precision, recall, f1, support = precision_recall_fscore_support(
y_true,
y_pred,
labels=labels,
average=None,
sample_weight=sample_weight,
zero_division=zero_division,
)
# Specificity
specificity = specificity_score(
y_true,
y_pred,
labels=labels,
average=None,
sample_weight=sample_weight,
)
# Geometric mean
geo_mean = geometric_mean_score(
y_true,
y_pred,
labels=labels,
average=None,
sample_weight=sample_weight,
)
# Index balanced accuracy
iba_gmean = make_index_balanced_accuracy(alpha=alpha, squared=True)(
geometric_mean_score
)
iba = iba_gmean(
y_true,
y_pred,
labels=labels,
average=None,
sample_weight=sample_weight,
)
report_dict = {}
for i, label in enumerate(labels):
report_dict_label = {}
values = [target_names[i]]
for score_name, score_value in zip(
headers[1:-1],
[
precision[i],
recall[i],
specificity[i],
f1[i],
geo_mean[i],
iba[i],
],
):
values += ["{0:0.{1}f}".format(score_value, digits)]
report_dict_label[score_name] = score_value
values += [f"{support[i]}"]
report_dict_label[headers[-1]] = support[i]
report += fmt % tuple(values)
report_dict[target_names[i]] = report_dict_label
report += "\n"
# compute averages
values = [last_line_heading]
for score_name, score_value in zip(
headers[1:-1],
[
np.average(precision, weights=support),
np.average(recall, weights=support),
np.average(specificity, weights=support),
np.average(f1, weights=support),
np.average(geo_mean, weights=support),
np.average(iba, weights=support),
],
):
values += ["{0:0.{1}f}".format(score_value, digits)]
report_dict[f"avg_{score_name}"] = score_value
values += [f"{np.sum(support)}"]
report += fmt % tuple(values)
report_dict["total_support"] = np.sum(support)
if output_dict:
return report_dict
return report
@validate_params(
{
"y_true": ["array-like"],
"y_pred": ["array-like"],
"sample_weight": ["array-like", None],
},
prefer_skip_nested_validation=True,
)
def macro_averaged_mean_absolute_error(y_true, y_pred, *, sample_weight=None):
"""Compute Macro-Averaged MAE for imbalanced ordinal classification.
This function computes each MAE for each class and average them,
giving an equal weight to each class.
Read more in the :ref:`User Guide <macro_averaged_mean_absolute_error>`.
.. versionadded:: 0.8
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated targets as returned by a classifier.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
loss : float or ndarray of floats
Macro-Averaged MAE output is non-negative floating point.
The best value is 0.0.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import mean_absolute_error
>>> from imblearn.metrics import macro_averaged_mean_absolute_error
>>> y_true_balanced = [1, 1, 2, 2]
>>> y_true_imbalanced = [1, 2, 2, 2]
>>> y_pred = [1, 2, 1, 2]
>>> mean_absolute_error(y_true_balanced, y_pred)
0.5
>>> mean_absolute_error(y_true_imbalanced, y_pred)
0.25
>>> macro_averaged_mean_absolute_error(y_true_balanced, y_pred)
0.5
>>> macro_averaged_mean_absolute_error(y_true_imbalanced, y_pred)
0.16...
"""
_, y_true, y_pred = _check_targets(y_true, y_pred)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
else:
sample_weight = np.ones(y_true.shape)
check_consistent_length(y_true, y_pred, sample_weight)
labels = unique_labels(y_true, y_pred)
mae = []
for possible_class in labels:
indices = np.flatnonzero(y_true == possible_class)
mae.append(
mean_absolute_error(
y_true[indices],
y_pred[indices],
sample_weight=sample_weight[indices],
)
)
return np.sum(mae) / len(mae)
| 39,954 | 34.017528 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/metrics/__init__.py | """
The :mod:`imblearn.metrics` module includes score functions, performance
metrics and pairwise metrics and distance computations.
"""
from ._classification import (
classification_report_imbalanced,
geometric_mean_score,
macro_averaged_mean_absolute_error,
make_index_balanced_accuracy,
sensitivity_score,
sensitivity_specificity_support,
specificity_score,
)
__all__ = [
"sensitivity_specificity_support",
"sensitivity_score",
"specificity_score",
"geometric_mean_score",
"make_index_balanced_accuracy",
"classification_report_imbalanced",
"macro_averaged_mean_absolute_error",
]
| 642 | 24.72 | 72 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/metrics/tests/test_classification.py | # coding: utf-8
"""Testing the metric for classification with imbalanced dataset"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from functools import partial
import numpy as np
import pytest
from sklearn import datasets, svm
from sklearn.metrics import (
accuracy_score,
average_precision_score,
brier_score_loss,
cohen_kappa_score,
jaccard_score,
precision_score,
recall_score,
roc_auc_score,
)
from sklearn.preprocessing import label_binarize
from sklearn.utils._testing import (
assert_allclose,
assert_array_equal,
assert_no_warnings,
)
from sklearn.utils.validation import check_random_state
from imblearn.metrics import (
classification_report_imbalanced,
geometric_mean_score,
macro_averaged_mean_absolute_error,
make_index_balanced_accuracy,
sensitivity_score,
sensitivity_specificity_support,
specificity_score,
)
RND_SEED = 42
R_TOL = 1e-2
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel="linear", probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_sensitivity_specificity_score_binary():
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
sen, spe, sup = sensitivity_specificity_support(y_true, y_pred, average=None)
assert_allclose(sen, [0.88, 0.68], rtol=R_TOL)
assert_allclose(spe, [0.68, 0.88], rtol=R_TOL)
assert_array_equal(sup, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
for kwargs in ({}, {"average": "binary"}):
sen = assert_no_warnings(sensitivity_score, y_true, y_pred, **kwargs)
assert sen == pytest.approx(0.68, rel=R_TOL)
spe = assert_no_warnings(specificity_score, y_true, y_pred, **kwargs)
assert spe == pytest.approx(0.88, rel=R_TOL)
@pytest.mark.filterwarnings("ignore:Specificity is ill-defined")
@pytest.mark.parametrize(
"y_pred, expected_sensitivity, expected_specificity",
[(([1, 1], [1, 1]), 1.0, 0.0), (([-1, -1], [-1, -1]), 0.0, 0.0)],
)
def test_sensitivity_specificity_f_binary_single_class(
y_pred, expected_sensitivity, expected_specificity
):
# Such a case may occur with non-stratified cross-validation
assert sensitivity_score(*y_pred) == expected_sensitivity
assert specificity_score(*y_pred) == expected_specificity
@pytest.mark.parametrize(
"average, expected_specificty",
[
(None, [1.0, 0.67, 1.0, 1.0, 1.0]),
("macro", np.mean([1.0, 0.67, 1.0, 1.0, 1.0])),
("micro", 15 / 16),
],
)
def test_sensitivity_specificity_extra_labels(average, expected_specificty):
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
actual = specificity_score(y_true, y_pred, labels=[0, 1, 2, 3, 4], average=average)
assert_allclose(expected_specificty, actual, rtol=R_TOL)
def test_sensitivity_specificity_ignored_labels():
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
specificity_13 = partial(specificity_score, y_true, y_pred, labels=[1, 3])
specificity_all = partial(specificity_score, y_true, y_pred, labels=None)
assert_allclose([1.0, 0.33], specificity_13(average=None), rtol=R_TOL)
assert_allclose(np.mean([1.0, 0.33]), specificity_13(average="macro"), rtol=R_TOL)
assert_allclose(
np.average([1.0, 0.33], weights=[2.0, 1.0]),
specificity_13(average="weighted"),
rtol=R_TOL,
)
assert_allclose(3.0 / (3.0 + 2.0), specificity_13(average="micro"), rtol=R_TOL)
# ensure the above were meaningful tests:
for each in ["macro", "weighted", "micro"]:
assert specificity_13(average=each) != specificity_all(average=each)
def test_sensitivity_specificity_error_multilabels():
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
with pytest.raises(ValueError):
sensitivity_score(y_true_bin, y_pred_bin)
def test_sensitivity_specificity_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad pos_label
with pytest.raises(ValueError):
sensitivity_specificity_support(y_true, y_pred, pos_label=2, average="binary")
# Bad average option
with pytest.raises(ValueError):
sensitivity_specificity_support([0, 1, 2], [1, 2, 0], average="mega")
def test_sensitivity_specificity_unused_pos_label():
# but average != 'binary'; even if data is binary
msg = r"use labels=\[pos_label\] to specify a single"
with pytest.warns(UserWarning, match=msg):
sensitivity_specificity_support(
[1, 2, 1], [1, 2, 2], pos_label=2, average="macro"
)
def test_geometric_mean_support_binary():
y_true, y_pred, _ = make_prediction(binary=True)
# compute the geometric mean for the binary problem
geo_mean = geometric_mean_score(y_true, y_pred)
assert_allclose(geo_mean, 0.77, rtol=R_TOL)
@pytest.mark.filterwarnings("ignore:Recall is ill-defined")
@pytest.mark.parametrize(
"y_true, y_pred, correction, expected_gmean",
[
([0, 0, 1, 1], [0, 0, 1, 1], 0.0, 1.0),
([0, 0, 0, 0], [1, 1, 1, 1], 0.0, 0.0),
([0, 0, 0, 0], [0, 0, 0, 0], 0.001, 1.0),
([0, 0, 0, 0], [1, 1, 1, 1], 0.001, 0.001),
([0, 0, 1, 1], [0, 1, 1, 0], 0.001, 0.5),
(
[0, 1, 2, 0, 1, 2],
[0, 2, 1, 0, 0, 1],
0.001,
(0.001**2) ** (1 / 3),
),
([0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5], 0.001, 1),
([0, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 1], 0.001, (0.5 * 0.75) ** 0.5),
],
)
def test_geometric_mean_multiclass(y_true, y_pred, correction, expected_gmean):
gmean = geometric_mean_score(y_true, y_pred, correction=correction)
assert gmean == pytest.approx(expected_gmean, rel=R_TOL)
@pytest.mark.filterwarnings("ignore:Recall is ill-defined")
@pytest.mark.parametrize(
"y_true, y_pred, average, expected_gmean",
[
([0, 1, 2, 0, 1, 2], [0, 2, 1, 0, 0, 1], "macro", 0.471),
([0, 1, 2, 0, 1, 2], [0, 2, 1, 0, 0, 1], "micro", 0.471),
([0, 1, 2, 0, 1, 2], [0, 2, 1, 0, 0, 1], "weighted", 0.471),
([0, 1, 2, 0, 1, 2], [0, 2, 1, 0, 0, 1], None, [0.8660254, 0.0, 0.0]),
],
)
def test_geometric_mean_average(y_true, y_pred, average, expected_gmean):
gmean = geometric_mean_score(y_true, y_pred, average=average)
assert gmean == pytest.approx(expected_gmean, rel=R_TOL)
@pytest.mark.parametrize(
"y_true, y_pred, sample_weight, average, expected_gmean",
[
([0, 1, 2, 0, 1, 2], [0, 1, 1, 0, 0, 1], None, "multiclass", 0.707),
(
[0, 1, 2, 0, 1, 2],
[0, 1, 1, 0, 0, 1],
[1, 2, 1, 1, 2, 1],
"multiclass",
0.707,
),
(
[0, 1, 2, 0, 1, 2],
[0, 1, 1, 0, 0, 1],
[1, 2, 1, 1, 2, 1],
"weighted",
0.333,
),
],
)
def test_geometric_mean_sample_weight(
y_true, y_pred, sample_weight, average, expected_gmean
):
gmean = geometric_mean_score(
y_true,
y_pred,
labels=[0, 1],
sample_weight=sample_weight,
average=average,
)
assert gmean == pytest.approx(expected_gmean, rel=R_TOL)
@pytest.mark.parametrize(
"average, expected_gmean",
[
("multiclass", 0.41),
(None, [0.85, 0.29, 0.7]),
("macro", 0.68),
("weighted", 0.65),
],
)
def test_geometric_mean_score_prediction(average, expected_gmean):
y_true, y_pred, _ = make_prediction(binary=False)
gmean = geometric_mean_score(y_true, y_pred, average=average)
assert gmean == pytest.approx(expected_gmean, rel=R_TOL)
def test_iba_geo_mean_binary():
y_true, y_pred, _ = make_prediction(binary=True)
iba_gmean = make_index_balanced_accuracy(alpha=0.5, squared=True)(
geometric_mean_score
)
iba = iba_gmean(y_true, y_pred)
assert_allclose(iba, 0.5948, rtol=R_TOL)
def _format_report(report):
return " ".join(report.split())
def test_classification_report_imbalanced_multiclass():
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = (
"pre rec spe f1 geo iba sup setosa 0.83 0.79 0.92 "
"0.81 0.85 0.72 24 versicolor 0.33 0.10 0.86 0.15 "
"0.29 0.08 31 virginica 0.42 0.90 0.55 0.57 0.70 "
"0.51 20 avg / total 0.51 0.53 0.80 0.47 0.58 0.40 75"
)
report = classification_report_imbalanced(
y_true,
y_pred,
labels=np.arange(len(iris.target_names)),
target_names=iris.target_names,
)
assert _format_report(report) == expected_report
# print classification report with label detection
expected_report = (
"pre rec spe f1 geo iba sup 0 0.83 0.79 0.92 0.81 "
"0.85 0.72 24 1 0.33 0.10 0.86 0.15 0.29 0.08 31 "
"2 0.42 0.90 0.55 0.57 0.70 0.51 20 avg / total "
"0.51 0.53 0.80 0.47 0.58 0.40 75"
)
report = classification_report_imbalanced(y_true, y_pred)
assert _format_report(report) == expected_report
def test_classification_report_imbalanced_multiclass_with_digits():
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = (
"pre rec spe f1 geo iba sup setosa 0.82609 0.79167 "
"0.92157 0.80851 0.85415 0.72010 24 versicolor "
"0.33333 0.09677 0.86364 0.15000 0.28910 0.07717 "
"31 virginica 0.41860 0.90000 0.54545 0.57143 0.70065 "
"0.50831 20 avg / total 0.51375 0.53333 0.79733 "
"0.47310 0.57966 0.39788 75"
)
report = classification_report_imbalanced(
y_true,
y_pred,
labels=np.arange(len(iris.target_names)),
target_names=iris.target_names,
digits=5,
)
assert _format_report(report) == expected_report
# print classification report with label detection
expected_report = (
"pre rec spe f1 geo iba sup 0 0.83 0.79 0.92 0.81 "
"0.85 0.72 24 1 0.33 0.10 0.86 0.15 0.29 0.08 31 "
"2 0.42 0.90 0.55 0.57 0.70 0.51 20 avg / total 0.51 "
"0.53 0.80 0.47 0.58 0.40 75"
)
report = classification_report_imbalanced(y_true, y_pred)
assert _format_report(report) == expected_report
def test_classification_report_imbalanced_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = (
"pre rec spe f1 geo iba sup blue 0.83 0.79 0.92 0.81 "
"0.85 0.72 24 green 0.33 0.10 0.86 0.15 0.29 0.08 31 "
"red 0.42 0.90 0.55 0.57 0.70 0.51 20 avg / total "
"0.51 0.53 0.80 0.47 0.58 0.40 75"
)
report = classification_report_imbalanced(y_true, y_pred)
assert _format_report(report) == expected_report
expected_report = (
"pre rec spe f1 geo iba sup a 0.83 0.79 0.92 0.81 0.85 "
"0.72 24 b 0.33 0.10 0.86 0.15 0.29 0.08 31 c 0.42 "
"0.90 0.55 0.57 0.70 0.51 20 avg / total 0.51 0.53 "
"0.80 0.47 0.58 0.40 75"
)
report = classification_report_imbalanced(
y_true, y_pred, target_names=["a", "b", "c"]
)
assert _format_report(report) == expected_report
def test_classification_report_imbalanced_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array(["blue\xa2", "green\xa2", "red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = (
"pre rec spe f1 geo iba sup blue¢ 0.83 0.79 0.92 0.81 "
"0.85 0.72 24 green¢ 0.33 0.10 0.86 0.15 0.29 0.08 31 "
"red¢ 0.42 0.90 0.55 0.57 0.70 0.51 20 avg / total "
"0.51 0.53 0.80 0.47 0.58 0.40 75"
)
report = classification_report_imbalanced(y_true, y_pred)
assert _format_report(report) == expected_report
def test_classification_report_imbalanced_multiclass_with_long_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array(["blue", "green" * 5, "red"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = (
"pre rec spe f1 geo iba sup blue 0.83 0.79 0.92 0.81 "
"0.85 0.72 24 greengreengreengreengreen 0.33 0.10 "
"0.86 0.15 0.29 0.08 31 red 0.42 0.90 0.55 0.57 0.70 "
"0.51 20 avg / total 0.51 0.53 0.80 0.47 0.58 0.40 75"
)
report = classification_report_imbalanced(y_true, y_pred)
assert _format_report(report) == expected_report
@pytest.mark.parametrize(
"score, expected_score",
[
(accuracy_score, 0.54756),
(jaccard_score, 0.33176),
(precision_score, 0.65025),
(recall_score, 0.41616),
],
)
def test_iba_sklearn_metrics(score, expected_score):
y_true, y_pred, _ = make_prediction(binary=True)
score_iba = make_index_balanced_accuracy(alpha=0.5, squared=True)(score)
score = score_iba(y_true, y_pred)
assert score == pytest.approx(expected_score)
@pytest.mark.parametrize(
"score_loss",
[average_precision_score, brier_score_loss, cohen_kappa_score, roc_auc_score],
)
def test_iba_error_y_score_prob_error(score_loss):
y_true, y_pred, _ = make_prediction(binary=True)
aps = make_index_balanced_accuracy(alpha=0.5, squared=True)(score_loss)
with pytest.raises(AttributeError):
aps(y_true, y_pred)
def test_classification_report_imbalanced_dict_with_target_names():
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
report = classification_report_imbalanced(
y_true,
y_pred,
labels=np.arange(len(iris.target_names)),
target_names=iris.target_names,
output_dict=True,
)
outer_keys = set(report.keys())
inner_keys = set(report["setosa"].keys())
expected_outer_keys = {
"setosa",
"versicolor",
"virginica",
"avg_pre",
"avg_rec",
"avg_spe",
"avg_f1",
"avg_geo",
"avg_iba",
"total_support",
}
expected_inner_keys = {"spe", "f1", "sup", "rec", "geo", "iba", "pre"}
assert outer_keys == expected_outer_keys
assert inner_keys == expected_inner_keys
def test_classification_report_imbalanced_dict_without_target_names():
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
print(iris.target_names)
report = classification_report_imbalanced(
y_true,
y_pred,
labels=np.arange(len(iris.target_names)),
output_dict=True,
)
print(report.keys())
outer_keys = set(report.keys())
inner_keys = set(report["0"].keys())
expected_outer_keys = {
"0",
"1",
"2",
"avg_pre",
"avg_rec",
"avg_spe",
"avg_f1",
"avg_geo",
"avg_iba",
"total_support",
}
expected_inner_keys = {"spe", "f1", "sup", "rec", "geo", "iba", "pre"}
assert outer_keys == expected_outer_keys
assert inner_keys == expected_inner_keys
@pytest.mark.parametrize(
"y_true, y_pred, expected_ma_mae",
[
([1, 1, 1, 2, 2, 2], [1, 2, 1, 2, 1, 2], 0.333),
([1, 1, 1, 1, 1, 2], [1, 2, 1, 2, 1, 2], 0.2),
([1, 1, 1, 2, 2, 2, 3, 3, 3], [1, 3, 1, 2, 1, 1, 2, 3, 3], 0.555),
([1, 1, 1, 1, 1, 1, 2, 3, 3], [1, 3, 1, 2, 1, 1, 2, 3, 3], 0.166),
],
)
def test_macro_averaged_mean_absolute_error(y_true, y_pred, expected_ma_mae):
ma_mae = macro_averaged_mean_absolute_error(y_true, y_pred)
assert ma_mae == pytest.approx(expected_ma_mae, rel=R_TOL)
def test_macro_averaged_mean_absolute_error_sample_weight():
y_true = [1, 1, 1, 2, 2, 2]
y_pred = [1, 2, 1, 2, 1, 2]
ma_mae_no_weights = macro_averaged_mean_absolute_error(y_true, y_pred)
sample_weight = [1, 1, 1, 1, 1, 1]
ma_mae_unit_weights = macro_averaged_mean_absolute_error(
y_true,
y_pred,
sample_weight=sample_weight,
)
assert ma_mae_unit_weights == pytest.approx(ma_mae_no_weights)
| 17,909 | 31.32852 | 87 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/metrics/tests/test_score_objects.py | """Test for score"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import pytest
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import make_scorer
from sklearn.model_selection import GridSearchCV, train_test_split
from imblearn.metrics import (
geometric_mean_score,
make_index_balanced_accuracy,
sensitivity_score,
specificity_score,
)
R_TOL = 1e-2
@pytest.fixture
def data():
X, y = make_blobs(random_state=0, centers=2)
return train_test_split(X, y, random_state=0)
@pytest.mark.parametrize(
"score, expected_score",
[
(sensitivity_score, 0.90),
(specificity_score, 0.90),
(geometric_mean_score, 0.90),
(make_index_balanced_accuracy()(geometric_mean_score), 0.82),
],
)
@pytest.mark.parametrize("average", ["macro", "weighted", "micro"])
def test_scorer_common_average(data, score, expected_score, average):
X_train, X_test, y_train, _ = data
scorer = make_scorer(score, pos_label=None, average=average)
grid = GridSearchCV(
LogisticRegression(),
param_grid={"C": [1, 10]},
scoring=scorer,
cv=3,
)
grid.fit(X_train, y_train).predict(X_test)
assert grid.best_score_ >= expected_score
@pytest.mark.parametrize(
"score, average, expected_score",
[
(sensitivity_score, "binary", 0.94),
(specificity_score, "binary", 0.89),
(geometric_mean_score, "multiclass", 0.90),
(
make_index_balanced_accuracy()(geometric_mean_score),
"multiclass",
0.82,
),
],
)
def test_scorer_default_average(data, score, average, expected_score):
X_train, X_test, y_train, _ = data
scorer = make_scorer(score, pos_label=1, average=average)
grid = GridSearchCV(
LogisticRegression(),
param_grid={"C": [1, 10]},
scoring=scorer,
cv=3,
)
grid.fit(X_train, y_train).predict(X_test)
assert grid.best_score_ >= expected_score
| 2,091 | 25.481013 | 70 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/metrics/tests/__init__.py | 0 | 0 | 0 | py | |
imbalanced-learn | imbalanced-learn-master/imblearn/metrics/tests/test_pairwise.py | """Test for the metrics that perform pairwise distance computation."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
import numpy as np
import pytest
from sklearn.exceptions import NotFittedError
from sklearn.preprocessing import LabelEncoder, OrdinalEncoder
from sklearn.utils._testing import _convert_container
from imblearn.metrics.pairwise import ValueDifferenceMetric
@pytest.fixture
def data():
rng = np.random.RandomState(0)
feature_1 = ["A"] * 10 + ["B"] * 20 + ["C"] * 30
feature_2 = ["A"] * 40 + ["B"] * 20
feature_3 = ["A"] * 20 + ["B"] * 20 + ["C"] * 10 + ["D"] * 10
X = np.array([feature_1, feature_2, feature_3], dtype=object).T
rng.shuffle(X)
y = rng.randint(low=0, high=2, size=X.shape[0])
y_labels = np.array(["not apple", "apple"], dtype=object)
y = y_labels[y]
return X, y
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("k, r", [(1, 1), (1, 2), (2, 1), (2, 2)])
@pytest.mark.parametrize("y_type", ["list", "array"])
@pytest.mark.parametrize("encode_label", [True, False])
def test_value_difference_metric(data, dtype, k, r, y_type, encode_label):
# Check basic feature of the metric:
# * the shape of the distance matrix is (n_samples, n_samples)
# * computing pairwise distance of X is the same than explicitely between
# X and X.
X, y = data
y = _convert_container(y, y_type)
if encode_label:
y = LabelEncoder().fit_transform(y)
encoder = OrdinalEncoder(dtype=dtype)
X_encoded = encoder.fit_transform(X)
vdm = ValueDifferenceMetric(k=k, r=r)
vdm.fit(X_encoded, y)
dist_1 = vdm.pairwise(X_encoded)
dist_2 = vdm.pairwise(X_encoded, X_encoded)
np.testing.assert_allclose(dist_1, dist_2)
assert dist_1.shape == (X.shape[0], X.shape[0])
assert dist_2.shape == (X.shape[0], X.shape[0])
@pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("k, r", [(1, 1), (1, 2), (2, 1), (2, 2)])
@pytest.mark.parametrize("y_type", ["list", "array"])
@pytest.mark.parametrize("encode_label", [True, False])
def test_value_difference_metric_property(dtype, k, r, y_type, encode_label):
# Check the property of the vdm distance. Let's check the property
# described in "Improved Heterogeneous Distance Functions", D.R. Wilson and
# T.R. Martinez, Journal of Artificial Intelligence Research 6 (1997) 1-34
# https://arxiv.org/pdf/cs/9701101.pdf
#
# "if an attribute color has three values red, green and blue, and the
# application is to identify whether or not an object is an apple, red and
# green would be considered closer than red and blue because the former two
# both have similar correlations with the output class apple."
# defined our feature
X = np.array(["green"] * 10 + ["red"] * 10 + ["blue"] * 10).reshape(-1, 1)
# 0 - not an apple / 1 - an apple
y = np.array([1] * 8 + [0] * 5 + [1] * 7 + [0] * 9 + [1])
y_labels = np.array(["not apple", "apple"], dtype=object)
y = y_labels[y]
y = _convert_container(y, y_type)
if encode_label:
y = LabelEncoder().fit_transform(y)
encoder = OrdinalEncoder(dtype=dtype)
X_encoded = encoder.fit_transform(X)
vdm = ValueDifferenceMetric(k=k, r=r)
vdm.fit(X_encoded, y)
sample_green = encoder.transform([["green"]])
sample_red = encoder.transform([["red"]])
sample_blue = encoder.transform([["blue"]])
for sample in (sample_green, sample_red, sample_blue):
# computing the distance between a sample of the same category should
# give a null distance
dist = vdm.pairwise(sample).squeeze()
assert dist == pytest.approx(0)
# check the property explained in the introduction example
dist_1 = vdm.pairwise(sample_green, sample_red).squeeze()
dist_2 = vdm.pairwise(sample_blue, sample_red).squeeze()
dist_3 = vdm.pairwise(sample_blue, sample_green).squeeze()
# green and red are very close
# blue is closer to red than green
assert dist_1 < dist_2
assert dist_1 < dist_3
assert dist_2 < dist_3
def test_value_difference_metric_categories(data):
# Check that "auto" is equivalent to provide the number categories
# beforehand
X, y = data
encoder = OrdinalEncoder(dtype=np.int32)
X_encoded = encoder.fit_transform(X)
n_categories = np.array([len(cat) for cat in encoder.categories_])
vdm_auto = ValueDifferenceMetric().fit(X_encoded, y)
vdm_categories = ValueDifferenceMetric(n_categories=n_categories)
vdm_categories.fit(X_encoded, y)
np.testing.assert_array_equal(vdm_auto.n_categories_, n_categories)
np.testing.assert_array_equal(vdm_auto.n_categories_, vdm_categories.n_categories_)
def test_value_difference_metric_categories_error(data):
# Check that we raise an error if n_categories is inconsistent with the
# number of features in X
X, y = data
encoder = OrdinalEncoder(dtype=np.int32)
X_encoded = encoder.fit_transform(X)
n_categories = [1, 2]
vdm = ValueDifferenceMetric(n_categories=n_categories)
err_msg = "The length of n_categories is not consistent with the number"
with pytest.raises(ValueError, match=err_msg):
vdm.fit(X_encoded, y)
def test_value_difference_metric_missing_categories(data):
# Check that we don't get issue when a category is missing between 0
# n_categories - 1
X, y = data
encoder = OrdinalEncoder(dtype=np.int32)
X_encoded = encoder.fit_transform(X)
n_categories = np.array([len(cat) for cat in encoder.categories_])
# remove a categories that could be between 0 and n_categories
X_encoded[X_encoded[:, -1] == 1] = 0
np.testing.assert_array_equal(np.unique(X_encoded[:, -1]), [0, 2, 3])
vdm = ValueDifferenceMetric(n_categories=n_categories)
vdm.fit(X_encoded, y)
for n_cats, proba in zip(n_categories, vdm.proba_per_class_):
assert proba.shape == (n_cats, len(np.unique(y)))
def test_value_difference_value_unfitted(data):
# Check that we raise a NotFittedError when `fit` is not not called before
# pairwise.
X, y = data
encoder = OrdinalEncoder(dtype=np.int32)
X_encoded = encoder.fit_transform(X)
with pytest.raises(NotFittedError):
ValueDifferenceMetric().pairwise(X_encoded)
| 6,395 | 35.971098 | 87 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/datasets/_imbalance.py | """Transform a dataset into an imbalanced dataset."""
# Authors: Dayvid Oliveira
# Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from collections import Counter
from collections.abc import Mapping
from ..under_sampling import RandomUnderSampler
from ..utils import check_sampling_strategy
from ..utils._param_validation import validate_params
@validate_params(
{
"X": ["array-like"],
"y": ["array-like"],
"sampling_strategy": [Mapping, callable, None],
"random_state": ["random_state"],
"verbose": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def make_imbalance(
X, y, *, sampling_strategy=None, random_state=None, verbose=False, **kwargs
):
"""Turn a dataset into an imbalanced dataset with a specific sampling strategy.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <make_imbalanced>`.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
Matrix containing the data to be imbalanced.
y : array-like of shape (n_samples,)
Corresponding label for each sample in X.
sampling_strategy : dict or callable,
Ratio to use for resampling the data set.
- When ``dict``, the keys correspond to the targeted classes. The
values correspond to the desired number of samples for each targeted
class.
- When callable, function taking ``y`` and returns a ``dict``. The keys
correspond to the targeted classes. The values correspond to the
desired number of samples for each class.
random_state : int, RandomState instance or None, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random.
verbose : bool, default=False
Show information regarding the sampling.
**kwargs : dict
Dictionary of additional keyword arguments to pass to
``sampling_strategy``.
Returns
-------
X_resampled : {ndarray, dataframe} of shape (n_samples_new, n_features)
The array containing the imbalanced data.
y_resampled : ndarray of shape (n_samples_new)
The corresponding label of `X_resampled`.
Notes
-----
See
:ref:`sphx_glr_auto_examples_applications_plot_multi_class_under_sampling.py`,
:ref:`sphx_glr_auto_examples_datasets_plot_make_imbalance.py`, and
:ref:`sphx_glr_auto_examples_api_plot_sampling_strategy_usage.py`.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import load_iris
>>> from imblearn.datasets import make_imbalance
>>> data = load_iris()
>>> X, y = data.data, data.target
>>> print(f'Distribution before imbalancing: {Counter(y)}')
Distribution before imbalancing: Counter({0: 50, 1: 50, 2: 50})
>>> X_res, y_res = make_imbalance(X, y,
... sampling_strategy={0: 10, 1: 20, 2: 30},
... random_state=42)
>>> print(f'Distribution after imbalancing: {Counter(y_res)}')
Distribution after imbalancing: Counter({2: 30, 1: 20, 0: 10})
"""
target_stats = Counter(y)
# restrict ratio to be a dict or a callable
if isinstance(sampling_strategy, Mapping) or callable(sampling_strategy):
sampling_strategy_ = check_sampling_strategy(
sampling_strategy, y, "under-sampling", **kwargs
)
if verbose:
print(f"The original target distribution in the dataset is: {target_stats}")
rus = RandomUnderSampler(
sampling_strategy=sampling_strategy_,
replacement=False,
random_state=random_state,
)
X_resampled, y_resampled = rus.fit_resample(X, y)
if verbose:
print(f"Make the dataset imbalanced: {Counter(y_resampled)}")
return X_resampled, y_resampled
| 4,120 | 33.923729 | 84 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/datasets/_zenodo.py | """Collection of imbalanced datasets.
This collection of datasets has been proposed in [1]_. The
characteristics of the available datasets are presented in the table
below.
ID Name Repository & Target Ratio #S #F
1 ecoli UCI, target: imU 8.6:1 336 7
2 optical_digits UCI, target: 8 9.1:1 5,620 64
3 satimage UCI, target: 4 9.3:1 6,435 36
4 pen_digits UCI, target: 5 9.4:1 10,992 16
5 abalone UCI, target: 7 9.7:1 4,177 10
6 sick_euthyroid UCI, target: sick euthyroid 9.8:1 3,163 42
7 spectrometer UCI, target: >=44 11:1 531 93
8 car_eval_34 UCI, target: good, v good 12:1 1,728 21
9 isolet UCI, target: A, B 12:1 7,797 617
10 us_crime UCI, target: >0.65 12:1 1,994 100
11 yeast_ml8 LIBSVM, target: 8 13:1 2,417 103
12 scene LIBSVM, target: >one label 13:1 2,407 294
13 libras_move UCI, target: 1 14:1 360 90
14 thyroid_sick UCI, target: sick 15:1 3,772 52
15 coil_2000 KDD, CoIL, target: minority 16:1 9,822 85
16 arrhythmia UCI, target: 06 17:1 452 278
17 solar_flare_m0 UCI, target: M->0 19:1 1,389 32
18 oil UCI, target: minority 22:1 937 49
19 car_eval_4 UCI, target: vgood 26:1 1,728 21
20 wine_quality UCI, wine, target: <=4 26:1 4,898 11
21 letter_img UCI, target: Z 26:1 20,000 16
22 yeast_me2 UCI, target: ME2 28:1 1,484 8
23 webpage LIBSVM, w7a, target: minority 33:1 34,780 300
24 ozone_level UCI, ozone, data 34:1 2,536 72
25 mammography UCI, target: minority 42:1 11,183 6
26 protein_homo KDD CUP 2004, minority 111:1 145,751 74
27 abalone_19 UCI, target: 19 130:1 4,177 10
References
----------
.. [1] Ding, Zejin, "Diversified Ensemble Classifiers for Highly
Imbalanced Data Learning and their Application in Bioinformatics."
Dissertation, Georgia State University, (2011).
"""
# Author: Guillaume Lemaitre
# License: BSD 3 clause
import tarfile
from collections import OrderedDict
from io import BytesIO
from os import makedirs
from os.path import isfile, join
from urllib.request import urlopen
import numpy as np
from sklearn.datasets import get_data_home
from sklearn.utils import Bunch, check_random_state
from ..utils._param_validation import validate_params
URL = "https://zenodo.org/record/61452/files/benchmark-imbalanced-learn.tar.gz"
PRE_FILENAME = "x"
POST_FILENAME = "data.npz"
MAP_NAME_ID_KEYS = [
"ecoli",
"optical_digits",
"satimage",
"pen_digits",
"abalone",
"sick_euthyroid",
"spectrometer",
"car_eval_34",
"isolet",
"us_crime",
"yeast_ml8",
"scene",
"libras_move",
"thyroid_sick",
"coil_2000",
"arrhythmia",
"solar_flare_m0",
"oil",
"car_eval_4",
"wine_quality",
"letter_img",
"yeast_me2",
"webpage",
"ozone_level",
"mammography",
"protein_homo",
"abalone_19",
]
MAP_NAME_ID = OrderedDict()
MAP_ID_NAME = OrderedDict()
for v, k in enumerate(MAP_NAME_ID_KEYS):
MAP_NAME_ID[k] = v + 1
MAP_ID_NAME[v + 1] = k
@validate_params(
{
"data_home": [None, str],
"filter_data": [None, tuple],
"download_if_missing": ["boolean"],
"random_state": ["random_state"],
"shuffle": ["boolean"],
"verbose": ["boolean"],
},
prefer_skip_nested_validation=True,
)
def fetch_datasets(
*,
data_home=None,
filter_data=None,
download_if_missing=True,
random_state=None,
shuffle=False,
verbose=False,
):
"""Load the benchmark datasets from Zenodo, downloading it if necessary.
.. versionadded:: 0.3
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
filter_data : tuple of str/int, default=None
A tuple containing the ID or the name of the datasets to be returned.
Refer to the above table to get the ID and name of the datasets.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, default=None
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
verbose : bool, default=False
Show information regarding the fetching.
Returns
-------
datasets : OrderedDict of Bunch object,
The ordered is defined by ``filter_data``. Each Bunch object ---
referred as dataset --- have the following attributes:
dataset.data : ndarray of shape (n_samples, n_features)
dataset.target : ndarray of shape (n_samples,)
dataset.DESCR : str
Description of the each dataset.
Notes
-----
This collection of datasets have been proposed in [1]_. The
characteristics of the available datasets are presented in the table
below.
+--+--------------+-------------------------------+-------+---------+-----+
|ID|Name | Repository & Target | Ratio | #S | #F |
+==+==============+===============================+=======+=========+=====+
|1 |ecoli | UCI, target: imU | 8.6:1 | 336 | 7 |
+--+--------------+-------------------------------+-------+---------+-----+
|2 |optical_digits| UCI, target: 8 | 9.1:1 | 5,620 | 64 |
+--+--------------+-------------------------------+-------+---------+-----+
|3 |satimage | UCI, target: 4 | 9.3:1 | 6,435 | 36 |
+--+--------------+-------------------------------+-------+---------+-----+
|4 |pen_digits | UCI, target: 5 | 9.4:1 | 10,992 | 16 |
+--+--------------+-------------------------------+-------+---------+-----+
|5 |abalone | UCI, target: 7 | 9.7:1 | 4,177 | 10 |
+--+--------------+-------------------------------+-------+---------+-----+
|6 |sick_euthyroid| UCI, target: sick euthyroid | 9.8:1 | 3,163 | 42 |
+--+--------------+-------------------------------+-------+---------+-----+
|7 |spectrometer | UCI, target: >=44 | 11:1 | 531 | 93 |
+--+--------------+-------------------------------+-------+---------+-----+
|8 |car_eval_34 | UCI, target: good, v good | 12:1 | 1,728 | 21 |
+--+--------------+-------------------------------+-------+---------+-----+
|9 |isolet | UCI, target: A, B | 12:1 | 7,797 | 617 |
+--+--------------+-------------------------------+-------+---------+-----+
|10|us_crime | UCI, target: >0.65 | 12:1 | 1,994 | 100 |
+--+--------------+-------------------------------+-------+---------+-----+
|11|yeast_ml8 | LIBSVM, target: 8 | 13:1 | 2,417 | 103 |
+--+--------------+-------------------------------+-------+---------+-----+
|12|scene | LIBSVM, target: >one label | 13:1 | 2,407 | 294 |
+--+--------------+-------------------------------+-------+---------+-----+
|13|libras_move | UCI, target: 1 | 14:1 | 360 | 90 |
+--+--------------+-------------------------------+-------+---------+-----+
|14|thyroid_sick | UCI, target: sick | 15:1 | 3,772 | 52 |
+--+--------------+-------------------------------+-------+---------+-----+
|15|coil_2000 | KDD, CoIL, target: minority | 16:1 | 9,822 | 85 |
+--+--------------+-------------------------------+-------+---------+-----+
|16|arrhythmia | UCI, target: 06 | 17:1 | 452 | 278 |
+--+--------------+-------------------------------+-------+---------+-----+
|17|solar_flare_m0| UCI, target: M->0 | 19:1 | 1,389 | 32 |
+--+--------------+-------------------------------+-------+---------+-----+
|18|oil | UCI, target: minority | 22:1 | 937 | 49 |
+--+--------------+-------------------------------+-------+---------+-----+
|19|car_eval_4 | UCI, target: vgood | 26:1 | 1,728 | 21 |
+--+--------------+-------------------------------+-------+---------+-----+
|20|wine_quality | UCI, wine, target: <=4 | 26:1 | 4,898 | 11 |
+--+--------------+-------------------------------+-------+---------+-----+
|21|letter_img | UCI, target: Z | 26:1 | 20,000 | 16 |
+--+--------------+-------------------------------+-------+---------+-----+
|22|yeast_me2 | UCI, target: ME2 | 28:1 | 1,484 | 8 |
+--+--------------+-------------------------------+-------+---------+-----+
|23|webpage | LIBSVM, w7a, target: minority | 33:1 | 34,780 | 300 |
+--+--------------+-------------------------------+-------+---------+-----+
|24|ozone_level | UCI, ozone, data | 34:1 | 2,536 | 72 |
+--+--------------+-------------------------------+-------+---------+-----+
|25|mammography | UCI, target: minority | 42:1 | 11,183 | 6 |
+--+--------------+-------------------------------+-------+---------+-----+
|26|protein_homo | KDD CUP 2004, minority | 111:1 | 145,751 | 74 |
+--+--------------+-------------------------------+-------+---------+-----+
|27|abalone_19 | UCI, target: 19 | 130:1 | 4,177 | 10 |
+--+--------------+-------------------------------+-------+---------+-----+
References
----------
.. [1] Ding, Zejin, "Diversified Ensemble Classifiers for Highly
Imbalanced Data Learning and their Application in Bioinformatics."
Dissertation, Georgia State University, (2011).
"""
data_home = get_data_home(data_home=data_home)
zenodo_dir = join(data_home, "zenodo")
datasets = OrderedDict()
if filter_data is None:
filter_data_ = MAP_NAME_ID.keys()
else:
list_data = MAP_NAME_ID.keys()
filter_data_ = []
for it in filter_data:
if isinstance(it, str):
if it not in list_data:
raise ValueError(
f"{it} is not a dataset available. "
f"The available datasets are {list_data}"
)
else:
filter_data_.append(it)
elif isinstance(it, int):
if it < 1 or it > 27:
raise ValueError(
f"The dataset with the ID={it} is not an "
f"available dataset. The IDs are "
f"{range(1, 28)}"
)
else:
# The index start at one, then we need to remove one
# to not have issue with the indexing.
filter_data_.append(MAP_ID_NAME[it])
else:
raise ValueError(
f"The value in the tuple should be str or int."
f" Got {type(it)} instead."
)
# go through the list and check if the data are available
for it in filter_data_:
filename = PRE_FILENAME + str(MAP_NAME_ID[it]) + POST_FILENAME
filename = join(zenodo_dir, filename)
available = isfile(filename)
if download_if_missing and not available:
makedirs(zenodo_dir, exist_ok=True)
if verbose:
print("Downloading %s" % URL)
f = BytesIO(urlopen(URL).read())
tar = tarfile.open(fileobj=f)
tar.extractall(path=zenodo_dir)
elif not download_if_missing and not available:
raise IOError("Data not found and `download_if_missing` is False")
data = np.load(filename)
X, y = data["data"], data["label"]
if shuffle:
ind = np.arange(X.shape[0])
rng = check_random_state(random_state)
rng.shuffle(ind)
X = X[ind]
y = y[ind]
datasets[it] = Bunch(data=X, target=y, DESCR=it)
return datasets
| 12,971 | 42.384615 | 79 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/datasets/__init__.py | """
The :mod:`imblearn.datasets` provides methods to generate
imbalanced data.
"""
from ._imbalance import make_imbalance
from ._zenodo import fetch_datasets
__all__ = ["make_imbalance", "fetch_datasets"]
| 207 | 19.8 | 57 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/datasets/tests/test_imbalance.py | """Test the module easy ensemble."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from collections import Counter
import numpy as np
import pytest
from sklearn.datasets import load_iris
from imblearn.datasets import make_imbalance
@pytest.fixture
def iris():
return load_iris(return_X_y=True)
@pytest.mark.parametrize(
"sampling_strategy, err_msg",
[
({0: -100, 1: 50, 2: 50}, "in a class cannot be negative"),
({0: 10, 1: 70}, "should be less or equal to the original"),
],
)
def test_make_imbalance_error(iris, sampling_strategy, err_msg):
# we are reusing part of utils.check_sampling_strategy, however this is not
# cover in the common tests so we will repeat it here
X, y = iris
with pytest.raises(ValueError, match=err_msg):
make_imbalance(X, y, sampling_strategy=sampling_strategy)
def test_make_imbalance_error_single_class(iris):
X, y = iris
y = np.zeros_like(y)
with pytest.raises(ValueError, match="needs to have more than 1 class."):
make_imbalance(X, y, sampling_strategy={0: 10})
@pytest.mark.parametrize(
"sampling_strategy, expected_counts",
[
({0: 10, 1: 20, 2: 30}, {0: 10, 1: 20, 2: 30}),
({0: 10, 1: 20}, {0: 10, 1: 20, 2: 50}),
],
)
def test_make_imbalance_dict(iris, sampling_strategy, expected_counts):
X, y = iris
_, y_ = make_imbalance(X, y, sampling_strategy=sampling_strategy)
assert Counter(y_) == expected_counts
@pytest.mark.parametrize("as_frame", [True, False], ids=["dataframe", "array"])
@pytest.mark.parametrize(
"sampling_strategy, expected_counts",
[
(
{"setosa": 10, "versicolor": 20, "virginica": 30},
{"setosa": 10, "versicolor": 20, "virginica": 30},
),
(
{"setosa": 10, "versicolor": 20},
{"setosa": 10, "versicolor": 20, "virginica": 50},
),
],
)
def test_make_imbalanced_iris(as_frame, sampling_strategy, expected_counts):
pd = pytest.importorskip("pandas")
iris = load_iris(as_frame=as_frame)
X, y = iris.data, iris.target
y = iris.target_names[iris.target]
if as_frame:
y = pd.Series(iris.target_names[iris.target], name="target")
X_res, y_res = make_imbalance(X, y, sampling_strategy=sampling_strategy)
if as_frame:
assert hasattr(X_res, "loc")
pd.testing.assert_index_equal(X_res.index, y_res.index)
assert Counter(y_res) == expected_counts
| 2,518 | 30.098765 | 79 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/datasets/tests/test_zenodo.py | """Test the datasets loader.
Skipped if datasets is not already downloaded to data_home.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import pytest
from sklearn.utils._testing import SkipTest
from imblearn.datasets import fetch_datasets
DATASET_SHAPE = {
"ecoli": (336, 7),
"optical_digits": (5620, 64),
"satimage": (6435, 36),
"pen_digits": (10992, 16),
"abalone": (4177, 10),
"sick_euthyroid": (3163, 42),
"spectrometer": (531, 93),
"car_eval_34": (1728, 21),
"isolet": (7797, 617),
"us_crime": (1994, 100),
"yeast_ml8": (2417, 103),
"scene": (2407, 294),
"libras_move": (360, 90),
"thyroid_sick": (3772, 52),
"coil_2000": (9822, 85),
"arrhythmia": (452, 278),
"solar_flare_m0": (1389, 32),
"oil": (937, 49),
"car_eval_4": (1728, 21),
"wine_quality": (4898, 11),
"letter_img": (20000, 16),
"yeast_me2": (1484, 8),
"webpage": (34780, 300),
"ozone_level": (2536, 72),
"mammography": (11183, 6),
"protein_homo": (145751, 74),
"abalone_19": (4177, 10),
}
def fetch(*args, **kwargs):
return fetch_datasets(*args, download_if_missing=True, **kwargs)
@pytest.mark.xfail
def test_fetch():
try:
datasets1 = fetch(shuffle=True, random_state=42)
except IOError:
raise SkipTest("Zenodo dataset can not be loaded.")
datasets2 = fetch(shuffle=True, random_state=37)
for k in DATASET_SHAPE.keys():
X1, X2 = datasets1[k].data, datasets2[k].data
assert DATASET_SHAPE[k] == X1.shape
assert X1.shape == X2.shape
y1, y2 = datasets1[k].target, datasets2[k].target
assert (X1.shape[0],) == y1.shape
assert (X1.shape[0],) == y2.shape
def test_fetch_filter():
try:
datasets1 = fetch(filter_data=tuple([1]), shuffle=True, random_state=42)
except IOError:
raise SkipTest("Zenodo dataset can not be loaded.")
datasets2 = fetch(filter_data=tuple(["ecoli"]), shuffle=True, random_state=37)
X1, X2 = datasets1["ecoli"].data, datasets2["ecoli"].data
assert DATASET_SHAPE["ecoli"] == X1.shape
assert X1.shape == X2.shape
assert X1.sum() == pytest.approx(X2.sum())
y1, y2 = datasets1["ecoli"].target, datasets2["ecoli"].target
assert (X1.shape[0],) == y1.shape
assert (X1.shape[0],) == y2.shape
@pytest.mark.parametrize(
"filter_data, err_msg",
[
(("rnf",), "is not a dataset available"),
((-1,), "dataset with the ID="),
((100,), "dataset with the ID="),
((1.00,), "value in the tuple"),
],
)
def test_fetch_error(filter_data, err_msg):
with pytest.raises(ValueError, match=err_msg):
fetch_datasets(filter_data=filter_data)
| 2,773 | 27.020202 | 82 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/datasets/tests/__init__.py | 0 | 0 | 0 | py | |
imbalanced-learn | imbalanced-learn-master/imblearn/tests/test_pipeline.py | """
Test the pipeline module.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import itertools
import re
import shutil
import time
from tempfile import mkdtemp
import numpy as np
import pytest
import sklearn
from joblib import Memory
from pytest import raises
from sklearn.base import BaseEstimator, clone
from sklearn.cluster import KMeans
from sklearn.datasets import load_iris, make_classification
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.neighbors import LocalOutlierFactor
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.utils._testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
)
from sklearn.utils.fixes import parse_version
from imblearn.datasets import make_imbalance
from imblearn.pipeline import Pipeline, make_pipeline
from imblearn.under_sampling import EditedNearestNeighbours as ENN
from imblearn.under_sampling import RandomUnderSampler
from imblearn.utils.estimator_checks import check_param_validation
sklearn_version = parse_version(sklearn.__version__)
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
R_TOL = 1e-4
class NoFit:
"""Small class to test parameter dispatching."""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class NoTrans(NoFit):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {"a": self.a, "b": self.b}
def set_params(self, **params):
self.a = params["a"]
return self
class NoInvTransf(NoTrans):
def transform(self, X, y=None):
return X
class Transf(NoInvTransf):
def transform(self, X, y=None):
return X
def inverse_transform(self, X):
return X
class TransfFitParams(Transf):
def fit(self, X, y, **fit_params):
self.fit_params = fit_params
return self
class Mult(BaseEstimator):
def __init__(self, mult=1):
self.mult = mult
def fit(self, X, y):
return self
def transform(self, X):
return np.asarray(X) * self.mult
def inverse_transform(self, X):
return np.asarray(X) / self.mult
def predict(self, X):
return (np.asarray(X) * self.mult).sum(axis=1)
predict_proba = predict_log_proba = decision_function = predict
def score(self, X, y=None):
return np.sum(X)
class FitParamT(BaseEstimator):
"""Mock classifier"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def fit_predict(self, X, y, should_succeed=False):
self.fit(X, y, should_succeed=should_succeed)
return self.predict(X)
def score(self, X, y=None, sample_weight=None):
if sample_weight is not None:
X = X * sample_weight
return np.sum(X)
class DummyTransf(Transf):
"""Transformer which store the column means"""
def fit(self, X, y):
self.means_ = np.mean(X, axis=0)
# store timestamp to figure out whether the result of 'fit' has been
# cached or not
self.timestamp_ = time.time()
return self
class DummyEstimatorParams(BaseEstimator):
"""Mock classifier that takes params on predict"""
def fit(self, X, y):
return self
def predict(self, X, got_attribute=False):
self.got_attribute = got_attribute
return self
class DummySampler(NoTrans):
"""Samplers which returns a balanced number of samples"""
def fit_resample(self, X, y):
self.means_ = np.mean(X, axis=0)
# store timestamp to figure out whether the result of 'fit' has been
# cached or not
self.timestamp_ = time.time()
return X, y
class FitTransformSample(NoTrans):
"""Estimator implementing both transform and sample"""
def fit(self, X, y, should_succeed=False):
pass
def fit_resample(self, X, y=None):
return X, y
def fit_transform(self, X, y=None):
return self.fit(X, y).transform(X)
def transform(self, X, y=None):
return X
def test_pipeline_init_tuple():
# Pipeline accepts steps as tuple
X = np.array([[1, 2]])
pipe = Pipeline((("transf", Transf()), ("clf", FitParamT())))
pipe.fit(X, y=None)
pipe.score(X)
pipe.set_params(transf="passthrough")
pipe.fit(X, y=None)
pipe.score(X)
def test_pipeline_init():
# Test the various init parameters of the pipeline.
with raises(TypeError):
Pipeline()
# Check that we can't instantiate pipelines with objects without fit
# method
X, y = load_iris(return_X_y=True)
error_regex = (
"Last step of Pipeline should implement fit or be the string 'passthrough'"
)
with raises(TypeError, match=error_regex):
model = Pipeline([("clf", NoFit())])
model.fit(X, y)
# Smoke test with only an estimator
clf = NoTrans()
pipe = Pipeline([("svc", clf)])
expected = dict(svc__a=None, svc__b=None, svc=clf, **pipe.get_params(deep=False))
assert pipe.get_params(deep=True) == expected
# Check that params are set
pipe.set_params(svc__a=0.1)
assert clf.a == 0.1
assert clf.b is None
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC(gamma="scale")
filter1 = SelectKBest(f_classif)
pipe = Pipeline([("anova", filter1), ("svc", clf)])
# Check that we can't instantiate with non-transformers on the way
# Note that NoTrans implements fit, but not transform
error_regex = "implement fit and transform or fit_resample"
with raises(TypeError, match=error_regex):
model = Pipeline([("t", NoTrans()), ("svc", clf)])
model.fit(X, y)
# Check that params are set
pipe.set_params(svc__C=0.1)
assert clf.C == 0.1
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
with raises(ValueError):
pipe.set_params(anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert pipe.named_steps["svc"] is not pipe2.named_steps["svc"]
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop("svc")
params.pop("anova")
params2.pop("svc")
params2.pop("anova")
assert params == params2
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression(solver="lbfgs", multi_class="auto")
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([("anova", filter1), ("logistic", clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([("transf", Transf()), ("clf", FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert pipe.predict(None)
# and transformer params should not be changed
assert pipe.named_steps["transf"].a is None
assert pipe.named_steps["transf"].b is None
# invalid parameters should raise an error message
with raises(TypeError, match="unexpected keyword argument"):
pipe.fit(None, None, clf__bad=True)
def test_pipeline_sample_weight_supported():
# Pipeline should pass sample_weight
X = np.array([[1, 2]])
pipe = Pipeline([("transf", Transf()), ("clf", FitParamT())])
pipe.fit(X, y=None)
assert pipe.score(X) == 3
assert pipe.score(X, y=None) == 3
assert pipe.score(X, y=None, sample_weight=None) == 3
assert pipe.score(X, sample_weight=np.array([2, 3])) == 8
def test_pipeline_sample_weight_unsupported():
# When sample_weight is None it shouldn't be passed
X = np.array([[1, 2]])
pipe = Pipeline([("transf", Transf()), ("clf", Mult())])
pipe.fit(X, y=None)
assert pipe.score(X) == 3
assert pipe.score(X, sample_weight=None) == 3
with raises(TypeError, match="unexpected keyword argument"):
pipe.score(X, sample_weight=np.array([2, 3]))
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([("cls", LinearRegression())])
with raises(ValueError, match="Invalid parameter"):
pipe.set_params(fake="nope")
# nested model check
with raises(ValueError, match="Invalid parameter"):
pipe.set_params(fake__estimator="nope")
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(gamma="scale", probability=True, random_state=0)
pca = PCA(svd_solver="full", n_components="mle", whiten=True)
pipe = Pipeline([("pca", pca), ("svc", clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver="randomized", whiten=True)
clf = SVC(
gamma="scale",
probability=True,
random_state=0,
decision_function_shape="ovr",
)
for preprocessing in [scaler, pca]:
pipe = Pipeline([("preprocess", preprocessing), ("svc", clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert predict.shape == (n_samples,)
proba = pipe.predict_proba(X)
assert proba.shape == (n_samples, n_classes)
log_proba = pipe.predict_log_proba(X)
assert log_proba.shape == (n_samples, n_classes)
decision_function = pipe.decision_function(X)
assert decision_function.shape == (n_samples, n_classes)
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0, n_init=10)
# As pipeline doesn't clone estimators on construction,
# it must have its own estimators
scaler_for_pipeline = StandardScaler()
km_for_pipeline = KMeans(random_state=0, n_init=10)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([("scaler", scaler_for_pipeline), ("Kmeans", km_for_pipeline)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver="full")
pipe = Pipeline([("scaler", scaler), ("pca", pca)])
error_regex = "'PCA' object has no attribute 'fit_predict'"
with raises(AttributeError, match=error_regex):
getattr(pipe, "fit_predict")
def test_fit_predict_with_intermediate_fit_params():
# tests that Pipeline passes fit_params to intermediate steps
# when fit_predict is invoked
pipe = Pipeline([("transf", TransfFitParams()), ("clf", FitParamT())])
pipe.fit_predict(
X=None, y=None, transf__should_get_this=True, clf__should_succeed=True
)
assert pipe.named_steps["transf"].fit_params["should_get_this"]
assert pipe.named_steps["clf"].successful
assert "should_succeed" not in pipe.named_steps["transf"].fit_params
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2, svd_solver="full")
pipeline = Pipeline([("pca", pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transf = Transf()
pipeline = Pipeline([("mock", transf)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transf.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_set_pipeline_steps():
transf1 = Transf()
transf2 = Transf()
pipeline = Pipeline([("mock", transf1)])
assert pipeline.named_steps["mock"] is transf1
# Directly setting attr
pipeline.steps = [("mock2", transf2)]
assert "mock" not in pipeline.named_steps
assert pipeline.named_steps["mock2"] is transf2
assert [("mock2", transf2)] == pipeline.steps
# Using set_params
pipeline.set_params(steps=[("mock", transf1)])
assert [("mock", transf1)] == pipeline.steps
# Using set_params to replace single step
pipeline.set_params(mock=transf2)
assert [("mock", transf2)] == pipeline.steps
# With invalid data
pipeline.set_params(steps=[("junk", ())])
with raises(TypeError):
pipeline.fit([[1]], [1])
with raises(TypeError):
pipeline.fit_transform([[1]], [1])
@pytest.mark.parametrize("passthrough", [None, "passthrough"])
def test_pipeline_correctly_adjusts_steps(passthrough):
X = np.array([[1]])
y = np.array([1])
mult2 = Mult(mult=2)
mult3 = Mult(mult=3)
mult5 = Mult(mult=5)
pipeline = Pipeline(
[("m2", mult2), ("bad", passthrough), ("m3", mult3), ("m5", mult5)]
)
pipeline.fit(X, y)
expected_names = ["m2", "bad", "m3", "m5"]
actual_names = [name for name, _ in pipeline.steps]
assert expected_names == actual_names
@pytest.mark.parametrize("passthrough", [None, "passthrough"])
def test_set_pipeline_step_passthrough(passthrough):
# Test setting Pipeline steps to None
X = np.array([[1]])
y = np.array([1])
mult2 = Mult(mult=2)
mult3 = Mult(mult=3)
mult5 = Mult(mult=5)
def make():
return Pipeline([("m2", mult2), ("m3", mult3), ("last", mult5)])
pipeline = make()
exp = 2 * 3 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline.set_params(m3=passthrough)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
expected_params = {
"steps": pipeline.steps,
"m2": mult2,
"m3": passthrough,
"last": mult5,
"memory": None,
"m2__mult": 2,
"last__mult": 5,
"verbose": False,
}
assert pipeline.get_params(deep=True) == expected_params
pipeline.set_params(m2=passthrough)
exp = 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
# for other methods, ensure no AttributeErrors on None:
other_methods = [
"predict_proba",
"predict_log_proba",
"decision_function",
"transform",
"score",
]
for method in other_methods:
getattr(pipeline, method)(X)
pipeline.set_params(m2=mult2)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline = make()
pipeline.set_params(last=passthrough)
# mult2 and mult3 are active
exp = 6
pipeline.fit(X, y)
pipeline.transform(X)
assert_array_equal([[exp]], pipeline.fit(X, y).transform(X))
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
with raises(AttributeError, match="has no attribute 'predict'"):
getattr(pipeline, "predict")
# Check 'passthrough' step at construction time
exp = 2 * 5
pipeline = Pipeline([("m2", mult2), ("m3", passthrough), ("last", mult5)])
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
def test_pipeline_ducktyping():
pipeline = make_pipeline(Mult(5))
pipeline.predict
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf())
assert not hasattr(pipeline, "predict")
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline("passthrough")
assert pipeline.steps[0] == ("passthrough", "passthrough")
assert not hasattr(pipeline, "predict")
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf(), NoInvTransf())
assert not hasattr(pipeline, "predict")
pipeline.transform
assert not hasattr(pipeline, "inverse_transform")
pipeline = make_pipeline(NoInvTransf(), Transf())
assert not hasattr(pipeline, "predict")
pipeline.transform
assert not hasattr(pipeline, "inverse_transform")
def test_make_pipeline():
t1 = Transf()
t2 = Transf()
pipe = make_pipeline(t1, t2)
assert isinstance(pipe, Pipeline)
assert pipe.steps[0][0] == "transf-1"
assert pipe.steps[1][0] == "transf-2"
pipe = make_pipeline(t1, t2, FitParamT())
assert isinstance(pipe, Pipeline)
assert pipe.steps[0][0] == "transf-1"
assert pipe.steps[1][0] == "transf-2"
assert pipe.steps[2][0] == "fitparamt"
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
with raises(AttributeError):
getattr(reg, "classes_")
clf = make_pipeline(
SelectKBest(k=1),
LogisticRegression(solver="lbfgs", multi_class="auto", random_state=0),
)
with raises(AttributeError):
getattr(clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_pipeline_memory_transformer():
iris = load_iris()
X = iris.data
y = iris.target
cachedir = mkdtemp()
try:
memory = Memory(cachedir, verbose=10)
# Test with Transformer + SVC
clf = SVC(gamma="scale", probability=True, random_state=0)
transf = DummyTransf()
pipe = Pipeline([("transf", clone(transf)), ("svc", clf)])
cached_pipe = Pipeline([("transf", transf), ("svc", clf)], memory=memory)
# Memoize the transformer at the first fit
cached_pipe.fit(X, y)
pipe.fit(X, y)
# Get the time stamp of the tranformer in the cached pipeline
expected_ts = cached_pipe.named_steps["transf"].timestamp_
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X), cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(
pipe.named_steps["transf"].means_,
cached_pipe.named_steps["transf"].means_,
)
assert not hasattr(transf, "means_")
# Check that we are reading the cache while fitting
# a second time
cached_pipe.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X), cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(
pipe.named_steps["transf"].means_,
cached_pipe.named_steps["transf"].means_,
)
assert cached_pipe.named_steps["transf"].timestamp_ == expected_ts
# Create a new pipeline with cloned estimators
# Check that even changing the name step does not affect the cache hit
clf_2 = SVC(gamma="scale", probability=True, random_state=0)
transf_2 = DummyTransf()
cached_pipe_2 = Pipeline(
[("transf_2", transf_2), ("svc", clf_2)], memory=memory
)
cached_pipe_2.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe_2.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe_2.predict_proba(X))
assert_array_equal(
pipe.predict_log_proba(X), cached_pipe_2.predict_log_proba(X)
)
assert_array_equal(pipe.score(X, y), cached_pipe_2.score(X, y))
assert_array_equal(
pipe.named_steps["transf"].means_,
cached_pipe_2.named_steps["transf_2"].means_,
)
assert cached_pipe_2.named_steps["transf_2"].timestamp_ == expected_ts
finally:
shutil.rmtree(cachedir)
def test_pipeline_memory_sampler():
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0,
)
cachedir = mkdtemp()
try:
memory = Memory(cachedir, verbose=10)
# Test with Transformer + SVC
clf = SVC(gamma="scale", probability=True, random_state=0)
transf = DummySampler()
pipe = Pipeline([("transf", clone(transf)), ("svc", clf)])
cached_pipe = Pipeline([("transf", transf), ("svc", clf)], memory=memory)
# Memoize the transformer at the first fit
cached_pipe.fit(X, y)
pipe.fit(X, y)
# Get the time stamp of the tranformer in the cached pipeline
expected_ts = cached_pipe.named_steps["transf"].timestamp_
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X), cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(
pipe.named_steps["transf"].means_,
cached_pipe.named_steps["transf"].means_,
)
assert not hasattr(transf, "means_")
# Check that we are reading the cache while fitting
# a second time
cached_pipe.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X), cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(
pipe.named_steps["transf"].means_,
cached_pipe.named_steps["transf"].means_,
)
assert cached_pipe.named_steps["transf"].timestamp_ == expected_ts
# Create a new pipeline with cloned estimators
# Check that even changing the name step does not affect the cache hit
clf_2 = SVC(gamma="scale", probability=True, random_state=0)
transf_2 = DummySampler()
cached_pipe_2 = Pipeline(
[("transf_2", transf_2), ("svc", clf_2)], memory=memory
)
cached_pipe_2.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe_2.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe_2.predict_proba(X))
assert_array_equal(
pipe.predict_log_proba(X), cached_pipe_2.predict_log_proba(X)
)
assert_array_equal(pipe.score(X, y), cached_pipe_2.score(X, y))
assert_array_equal(
pipe.named_steps["transf"].means_,
cached_pipe_2.named_steps["transf_2"].means_,
)
assert cached_pipe_2.named_steps["transf_2"].timestamp_ == expected_ts
finally:
shutil.rmtree(cachedir)
def test_pipeline_methods_pca_rus_svm():
# Test the various methods of the pipeline (pca + svm).
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0,
)
# Test with PCA + SVC
clf = SVC(gamma="scale", probability=True, random_state=0)
pca = PCA()
rus = RandomUnderSampler(random_state=0)
pipe = Pipeline([("pca", pca), ("rus", rus), ("svc", clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_rus_pca_svm():
# Test the various methods of the pipeline (pca + svm).
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0,
)
# Test with PCA + SVC
clf = SVC(gamma="scale", probability=True, random_state=0)
pca = PCA()
rus = RandomUnderSampler(random_state=0)
pipe = Pipeline([("rus", rus), ("pca", pca), ("svc", clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_sample():
# Test whether pipeline works with a sampler at the end.
# Also test pipeline.sampler
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0,
)
rus = RandomUnderSampler(random_state=0)
pipeline = Pipeline([("rus", rus)])
# test transform and fit_transform:
X_trans, y_trans = pipeline.fit_resample(X, y)
X_trans2, y_trans2 = rus.fit_resample(X, y)
assert_allclose(X_trans, X_trans2, rtol=R_TOL)
assert_allclose(y_trans, y_trans2, rtol=R_TOL)
pca = PCA()
pipeline = Pipeline([("pca", PCA()), ("rus", rus)])
X_trans, y_trans = pipeline.fit_resample(X, y)
X_pca = pca.fit_transform(X)
X_trans2, y_trans2 = rus.fit_resample(X_pca, y)
# We round the value near to zero. It seems that PCA has some issue
# with that
X_trans[np.bitwise_and(X_trans < R_TOL, X_trans > -R_TOL)] = 0
X_trans2[np.bitwise_and(X_trans2 < R_TOL, X_trans2 > -R_TOL)] = 0
assert_allclose(X_trans, X_trans2, rtol=R_TOL)
assert_allclose(y_trans, y_trans2, rtol=R_TOL)
def test_pipeline_sample_transform():
# Test whether pipeline works with a sampler at the end.
# Also test pipeline.sampler
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0,
)
rus = RandomUnderSampler(random_state=0)
pca = PCA()
pca2 = PCA()
pipeline = Pipeline([("pca", pca), ("rus", rus), ("pca2", pca2)])
pipeline.fit(X, y).transform(X)
def test_pipeline_none_classifier():
# Test pipeline using None as preprocessing step and a classifier
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0,
)
clf = LogisticRegression(solver="lbfgs", random_state=0)
pipe = make_pipeline(None, clf)
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.decision_function(X)
pipe.score(X, y)
def test_pipeline_none_sampler_classifier():
# Test pipeline using None, RUS and a classifier
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0,
)
clf = LogisticRegression(solver="lbfgs", random_state=0)
rus = RandomUnderSampler(random_state=0)
pipe = make_pipeline(None, rus, clf)
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.decision_function(X)
pipe.score(X, y)
def test_pipeline_sampler_none_classifier():
# Test pipeline using RUS, None and a classifier
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0,
)
clf = LogisticRegression(solver="lbfgs", random_state=0)
rus = RandomUnderSampler(random_state=0)
pipe = make_pipeline(rus, None, clf)
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.decision_function(X)
pipe.score(X, y)
def test_pipeline_none_sampler_sample():
# Test pipeline using None step and a sampler
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0,
)
rus = RandomUnderSampler(random_state=0)
pipe = make_pipeline(None, rus)
pipe.fit_resample(X, y)
def test_pipeline_none_transformer():
# Test pipeline using None and a transformer that implements transform and
# inverse_transform
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0,
)
pca = PCA(whiten=True)
pipe = make_pipeline(None, pca)
pipe.fit(X, y)
X_trans = pipe.transform(X)
X_inversed = pipe.inverse_transform(X_trans)
assert_array_almost_equal(X, X_inversed)
def test_pipeline_methods_anova_rus():
# Test the various methods of the pipeline (anova).
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0,
)
# Test with RandomUnderSampling + Anova + LogisticRegression
clf = LogisticRegression(solver="lbfgs")
rus = RandomUnderSampler(random_state=0)
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([("rus", rus), ("anova", filter1), ("logistic", clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_with_step_that_implements_both_sample_and_transform():
# Test the various methods of the pipeline (anova).
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0,
)
clf = LogisticRegression(solver="lbfgs")
with raises(TypeError):
pipeline = Pipeline([("step", FitTransformSample()), ("logistic", clf)])
pipeline.fit(X, y)
def test_pipeline_with_step_that_it_is_pipeline():
# Test the various methods of the pipeline (anova).
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=5000,
random_state=0,
)
# Test with RandomUnderSampling + Anova + LogisticRegression
clf = LogisticRegression(solver="lbfgs")
rus = RandomUnderSampler(random_state=0)
filter1 = SelectKBest(f_classif, k=2)
pipe1 = Pipeline([("rus", rus), ("anova", filter1)])
with raises(TypeError):
pipe2 = Pipeline([("pipe1", pipe1), ("logistic", clf)])
pipe2.fit(X, y)
def test_pipeline_fit_then_sample_with_sampler_last_estimator():
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=50000,
random_state=0,
)
rus = RandomUnderSampler(random_state=42)
enn = ENN()
pipeline = make_pipeline(rus, enn)
X_fit_resample_resampled, y_fit_resample_resampled = pipeline.fit_resample(X, y)
pipeline = make_pipeline(rus, enn)
pipeline.fit(X, y)
X_fit_then_sample_res, y_fit_then_sample_res = pipeline.fit_resample(X, y)
assert_array_equal(X_fit_resample_resampled, X_fit_then_sample_res)
assert_array_equal(y_fit_resample_resampled, y_fit_then_sample_res)
def test_pipeline_fit_then_sample_3_samplers_with_sampler_last_estimator():
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=50000,
random_state=0,
)
rus = RandomUnderSampler(random_state=42)
enn = ENN()
pipeline = make_pipeline(rus, enn, rus)
X_fit_resample_resampled, y_fit_resample_resampled = pipeline.fit_resample(X, y)
pipeline = make_pipeline(rus, enn, rus)
pipeline.fit(X, y)
X_fit_then_sample_res, y_fit_then_sample_res = pipeline.fit_resample(X, y)
assert_array_equal(X_fit_resample_resampled, X_fit_then_sample_res)
assert_array_equal(y_fit_resample_resampled, y_fit_then_sample_res)
def test_make_pipeline_memory():
cachedir = mkdtemp()
try:
memory = Memory(cachedir, verbose=10)
pipeline = make_pipeline(DummyTransf(), SVC(gamma="scale"), memory=memory)
assert pipeline.memory is memory
pipeline = make_pipeline(DummyTransf(), SVC(gamma="scale"))
assert pipeline.memory is None
finally:
shutil.rmtree(cachedir)
def test_predict_with_predict_params():
# tests that Pipeline passes predict_params to the final estimator
# when predict is invoked
pipe = Pipeline([("transf", Transf()), ("clf", DummyEstimatorParams())])
pipe.fit(None, None)
pipe.predict(X=None, got_attribute=True)
assert pipe.named_steps["clf"].got_attribute
def test_resampler_last_stage_passthrough():
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.1, 0.9],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=50000,
random_state=0,
)
rus = RandomUnderSampler(random_state=42)
pipe = make_pipeline(rus, None)
pipe.fit_resample(X, y)
def test_pipeline_score_samples_pca_lof_binary():
X, y = make_classification(
n_classes=2,
class_sep=2,
weights=[0.3, 0.7],
n_informative=3,
n_redundant=1,
flip_y=0,
n_features=20,
n_clusters_per_class=1,
n_samples=500,
random_state=0,
)
# Test that the score_samples method is implemented on a pipeline.
# Test that the score_samples method on pipeline yields same results as
# applying transform and score_samples steps separately.
rus = RandomUnderSampler(random_state=42)
pca = PCA(svd_solver="full", n_components="mle", whiten=True)
lof = LocalOutlierFactor(novelty=True)
pipe = Pipeline([("rus", rus), ("pca", pca), ("lof", lof)])
pipe.fit(X, y)
# Check the shapes
assert pipe.score_samples(X).shape == (X.shape[0],)
# Check the values
X_res, _ = rus.fit_resample(X, y)
lof.fit(pca.fit_transform(X_res))
assert_allclose(pipe.score_samples(X), lof.score_samples(pca.transform(X)))
def test_score_samples_on_pipeline_without_score_samples():
X = np.array([[1], [2]])
y = np.array([1, 2])
# Test that a pipeline does not have score_samples method when the final
# step of the pipeline does not have score_samples defined.
pipe = make_pipeline(LogisticRegression())
pipe.fit(X, y)
with pytest.raises(
AttributeError,
match="'LogisticRegression' object has no attribute 'score_samples'",
):
pipe.score_samples(X)
def test_pipeline_param_error():
clf = make_pipeline(LogisticRegression())
with pytest.raises(
ValueError,
match="Pipeline.fit does not accept the sample_weight parameter",
):
clf.fit([[0], [0]], [0, 1], sample_weight=[1, 1])
parameter_grid_test_verbose = (
(est, pattern, method)
for (est, pattern), method in itertools.product(
[
(
Pipeline([("transf", Transf()), ("clf", FitParamT())]),
r"\[Pipeline\].*\(step 1 of 2\) Processing transf.* total=.*\n"
r"\[Pipeline\].*\(step 2 of 2\) Processing clf.* total=.*\n$",
),
(
Pipeline([("transf", Transf()), ("noop", None), ("clf", FitParamT())]),
r"\[Pipeline\].*\(step 1 of 3\) Processing transf.* total=.*\n"
r"\[Pipeline\].*\(step 2 of 3\) Processing noop.* total=.*\n"
r"\[Pipeline\].*\(step 3 of 3\) Processing clf.* total=.*\n$",
),
(
Pipeline(
[
("transf", Transf()),
("noop", "passthrough"),
("clf", FitParamT()),
]
),
r"\[Pipeline\].*\(step 1 of 3\) Processing transf.* total=.*\n"
r"\[Pipeline\].*\(step 2 of 3\) Processing noop.* total=.*\n"
r"\[Pipeline\].*\(step 3 of 3\) Processing clf.* total=.*\n$",
),
(
Pipeline([("transf", Transf()), ("clf", None)]),
r"\[Pipeline\].*\(step 1 of 2\) Processing transf.* total=.*\n"
r"\[Pipeline\].*\(step 2 of 2\) Processing clf.* total=.*\n$",
),
(
Pipeline([("transf", None), ("mult", Mult())]),
r"\[Pipeline\].*\(step 1 of 2\) Processing transf.* total=.*\n"
r"\[Pipeline\].*\(step 2 of 2\) Processing mult.* total=.*\n$",
),
(
Pipeline([("transf", "passthrough"), ("mult", Mult())]),
r"\[Pipeline\].*\(step 1 of 2\) Processing transf.* total=.*\n"
r"\[Pipeline\].*\(step 2 of 2\) Processing mult.* total=.*\n$",
),
(
FeatureUnion([("mult1", Mult()), ("mult2", Mult())]),
r"\[FeatureUnion\].*\(step 1 of 2\) Processing mult1.* total=.*\n"
r"\[FeatureUnion\].*\(step 2 of 2\) Processing mult2.* total=.*\n$",
),
(
FeatureUnion([("mult1", "drop"), ("mult2", Mult()), ("mult3", "drop")]),
r"\[FeatureUnion\].*\(step 1 of 1\) Processing mult2.* total=.*\n$",
),
],
["fit", "fit_transform", "fit_predict"],
)
if hasattr(est, method)
and not (
method == "fit_transform"
and hasattr(est, "steps")
and isinstance(est.steps[-1][1], FitParamT)
)
)
@pytest.mark.parametrize("est, pattern, method", parameter_grid_test_verbose)
def test_verbose(est, method, pattern, capsys):
func = getattr(est, method)
X = [[1, 2, 3], [4, 5, 6]]
y = [[7], [8]]
est.set_params(verbose=False)
func(X, y)
assert not capsys.readouterr().out, "Got output for verbose=False"
est.set_params(verbose=True)
func(X, y)
assert re.match(pattern, capsys.readouterr().out)
def test_pipeline_score_samples_pca_lof_multiclass():
X, y = load_iris(return_X_y=True)
sampling_strategy = {0: 50, 1: 30, 2: 20}
X, y = make_imbalance(X, y, sampling_strategy=sampling_strategy)
# Test that the score_samples method is implemented on a pipeline.
# Test that the score_samples method on pipeline yields same results as
# applying transform and score_samples steps separately.
rus = RandomUnderSampler()
pca = PCA(svd_solver="full", n_components="mle", whiten=True)
lof = LocalOutlierFactor(novelty=True)
pipe = Pipeline([("rus", rus), ("pca", pca), ("lof", lof)])
pipe.fit(X, y)
# Check the shapes
assert pipe.score_samples(X).shape == (X.shape[0],)
# Check the values
lof.fit(pca.fit_transform(X))
assert_allclose(pipe.score_samples(X), lof.score_samples(pca.transform(X)))
def test_pipeline_param_validation():
model = Pipeline(
[("sampler", RandomUnderSampler()), ("classifier", LogisticRegression())]
)
check_param_validation("Pipeline", model)
@pytest.mark.skipif(
sklearn_version < parse_version("1.2"), reason="requires scikit-learn >= 1.2"
)
def test_pipeline_with_set_output():
pd = pytest.importorskip("pandas")
X, y = load_iris(return_X_y=True, as_frame=True)
pipeline = make_pipeline(
StandardScaler(), RandomUnderSampler(), LogisticRegression()
).set_output(transform="default")
pipeline.fit(X, y)
X_res, y_res = pipeline[:-1].fit_resample(X, y)
assert isinstance(X_res, np.ndarray)
# transformer will not change `y` and sampler will always preserve the type of `y`
assert isinstance(y_res, type(y))
pipeline.set_output(transform="pandas")
X_res, y_res = pipeline[:-1].fit_resample(X, y)
assert isinstance(X_res, pd.DataFrame)
# transformer will not change `y` and sampler will always preserve the type of `y`
assert isinstance(y_res, type(y))
| 44,011 | 31.290536 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/tests/test_common.py | """Common tests"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import warnings
from collections import OrderedDict
import numpy as np
import pytest
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils._testing import SkipTest, ignore_warnings, set_random_state
from sklearn.utils.estimator_checks import _construct_instance, _get_check_estimator_ids
from sklearn.utils.estimator_checks import (
parametrize_with_checks as parametrize_with_checks_sklearn,
)
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import NearMiss, RandomUnderSampler
from imblearn.utils.estimator_checks import (
_set_checking_parameters,
check_dataframe_column_names_consistency,
check_param_validation,
parametrize_with_checks,
)
from imblearn.utils.testing import all_estimators
@pytest.mark.parametrize("name, Estimator", all_estimators())
def test_all_estimator_no_base_class(name, Estimator):
# test that all_estimators doesn't find abstract classes.
msg = f"Base estimators such as {name} should not be included" f" in all_estimators"
assert not name.lower().startswith("base"), msg
def _tested_estimators():
for name, Estimator in all_estimators():
try:
estimator = _construct_instance(Estimator)
set_random_state(estimator)
except SkipTest:
continue
if isinstance(estimator, NearMiss):
# For NearMiss, let's check the three algorithms
for version in (1, 2, 3):
yield clone(estimator).set_params(version=version)
else:
yield estimator
@parametrize_with_checks_sklearn(list(_tested_estimators()))
def test_estimators_compatibility_sklearn(estimator, check, request):
_set_checking_parameters(estimator)
check(estimator)
@parametrize_with_checks(list(_tested_estimators()))
def test_estimators_imblearn(estimator, check, request):
# Common tests for estimator instances
with ignore_warnings(
category=(
FutureWarning,
ConvergenceWarning,
UserWarning,
FutureWarning,
)
):
_set_checking_parameters(estimator)
check(estimator)
@pytest.mark.parametrize(
"estimator", _tested_estimators(), ids=_get_check_estimator_ids
)
def test_check_param_validation(estimator):
name = estimator.__class__.__name__
_set_checking_parameters(estimator)
check_param_validation(name, estimator)
@pytest.mark.parametrize("Sampler", [RandomOverSampler, RandomUnderSampler])
def test_strategy_as_ordered_dict(Sampler):
"""Check that it is possible to pass an `OrderedDict` as strategy."""
rng = np.random.RandomState(42)
X, y = rng.randn(30, 2), np.array([0] * 10 + [1] * 20)
sampler = Sampler(random_state=42)
if isinstance(sampler, RandomOverSampler):
strategy = OrderedDict({0: 20, 1: 20})
else:
strategy = OrderedDict({0: 10, 1: 10})
sampler.set_params(sampling_strategy=strategy)
X_res, y_res = sampler.fit_resample(X, y)
assert X_res.shape[0] == sum(strategy.values())
assert y_res.shape[0] == sum(strategy.values())
@pytest.mark.parametrize(
"estimator", _tested_estimators(), ids=_get_check_estimator_ids
)
def test_pandas_column_name_consistency(estimator):
_set_checking_parameters(estimator)
with ignore_warnings(category=(FutureWarning)):
with warnings.catch_warnings(record=True) as record:
check_dataframe_column_names_consistency(
estimator.__class__.__name__, estimator
)
for warning in record:
assert "was fitted without feature names" not in str(warning.message)
| 3,795 | 33.198198 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/tests/test_docstring_parameters.py | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Raghav RV <rvraghav93@gmail.com>
# License: BSD 3 clause
import importlib
import inspect
import warnings
from inspect import signature
from pkgutil import walk_packages
import pytest
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.utils import IS_PYPY
from sklearn.utils._testing import (
_get_func_name,
check_docstring_parameters,
ignore_warnings,
)
from sklearn.utils.estimator_checks import _enforce_estimator_tags_y
try:
from sklearn.utils.estimator_checks import _enforce_estimator_tags_x
except ImportError:
# scikit-learn >= 1.2
from sklearn.utils.estimator_checks import (
_enforce_estimator_tags_X as _enforce_estimator_tags_x,
)
from sklearn.utils.deprecation import _is_deprecated
from sklearn.utils.estimator_checks import _construct_instance
import imblearn
from imblearn.base import is_sampler
from imblearn.utils.estimator_checks import _set_checking_parameters
from imblearn.utils.testing import all_estimators
# walk_packages() ignores DeprecationWarnings, now we need to ignore
# FutureWarnings
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
# mypy error: Module has no attribute "__path__"
imblearn_path = imblearn.__path__ # type: ignore # mypy issue #1422
PUBLIC_MODULES = set(
[
pckg[1]
for pckg in walk_packages(prefix="imblearn.", path=imblearn_path)
if not ("._" in pckg[1] or ".tests." in pckg[1])
]
)
# functions to ignore args / docstring of
_DOCSTRING_IGNORES = [
"RUSBoostClassifier", # TODO remove after releasing scikit-learn 1.0.1
"ValueDifferenceMetric",
]
# Methods where y param should be ignored if y=None by default
_METHODS_IGNORE_NONE_Y = [
"fit",
"score",
"fit_predict",
"fit_transform",
"partial_fit",
"predict",
]
# numpydoc 0.8.0's docscrape tool raises because of collections.abc under
# Python 3.7
@pytest.mark.filterwarnings("ignore::FutureWarning")
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
@pytest.mark.skipif(IS_PYPY, reason="test segfaults on PyPy")
def test_docstring_parameters():
# Test module docstring formatting
# Skip test if numpydoc is not found
pytest.importorskip(
"numpydoc", reason="numpydoc is required to test the docstrings"
)
# XXX unreached code as of v0.22
from numpydoc import docscrape
incorrect = []
for name in PUBLIC_MODULES:
if name.endswith(".conftest"):
# pytest tooling, not part of the scikit-learn API
continue
with warnings.catch_warnings(record=True):
module = importlib.import_module(name)
classes = inspect.getmembers(module, inspect.isclass)
# Exclude non-scikit-learn classes
classes = [cls for cls in classes if cls[1].__module__.startswith("imblearn")]
for cname, cls in classes:
this_incorrect = []
if cname in _DOCSTRING_IGNORES or cname.startswith("_"):
continue
if inspect.isabstract(cls):
continue
with warnings.catch_warnings(record=True) as w:
cdoc = docscrape.ClassDoc(cls)
if len(w):
raise RuntimeError(
"Error for __init__ of %s in %s:\n%s" % (cls, name, w[0])
)
cls_init = getattr(cls, "__init__", None)
if _is_deprecated(cls_init):
continue
elif cls_init is not None:
this_incorrect += check_docstring_parameters(cls.__init__, cdoc)
for method_name in cdoc.methods:
method = getattr(cls, method_name)
if _is_deprecated(method):
continue
param_ignore = None
# Now skip docstring test for y when y is None
# by default for API reason
if method_name in _METHODS_IGNORE_NONE_Y:
sig = signature(method)
if "y" in sig.parameters and sig.parameters["y"].default is None:
param_ignore = ["y"] # ignore y for fit and score
result = check_docstring_parameters(method, ignore=param_ignore)
this_incorrect += result
incorrect += this_incorrect
functions = inspect.getmembers(module, inspect.isfunction)
# Exclude imported functions
functions = [fn for fn in functions if fn[1].__module__ == name]
for fname, func in functions:
# Don't test private methods / functions
if fname.startswith("_"):
continue
if fname == "configuration" and name.endswith("setup"):
continue
name_ = _get_func_name(func)
if not any(d in name_ for d in _DOCSTRING_IGNORES) and not _is_deprecated(
func
):
incorrect += check_docstring_parameters(func)
msg = "\n".join(incorrect)
if len(incorrect) > 0:
raise AssertionError("Docstring Error:\n" + msg)
@ignore_warnings(category=FutureWarning)
def test_tabs():
# Test that there are no tabs in our source files
for importer, modname, ispkg in walk_packages(
imblearn.__path__, prefix="imblearn."
):
if IS_PYPY:
continue
# because we don't import
mod = importlib.import_module(modname)
try:
source = inspect.getsource(mod)
except IOError: # user probably should have run "make clean"
continue
assert "\t" not in source, (
'"%s" has tabs, please remove them ',
"or add it to the ignore list" % modname,
)
def _construct_compose_pipeline_instance(Estimator):
# Minimal / degenerate instances: only useful to test the docstrings.
if Estimator.__name__ == "Pipeline":
return Estimator(steps=[("clf", LogisticRegression())])
@pytest.mark.parametrize("name, Estimator", all_estimators())
def test_fit_docstring_attributes(name, Estimator):
pytest.importorskip("numpydoc")
from numpydoc import docscrape
if Estimator.__name__ in _DOCSTRING_IGNORES:
return
doc = docscrape.ClassDoc(Estimator)
attributes = doc["Attributes"]
if Estimator.__name__ == "Pipeline":
est = _construct_compose_pipeline_instance(Estimator)
else:
est = _construct_instance(Estimator)
_set_checking_parameters(est)
X, y = make_classification(
n_samples=20,
n_features=3,
n_redundant=0,
n_classes=2,
random_state=2,
)
y = _enforce_estimator_tags_y(est, y)
X = _enforce_estimator_tags_x(est, X)
if "oob_score" in est.get_params():
est.set_params(bootstrap=True, oob_score=True)
if is_sampler(est):
est.fit_resample(X, y)
else:
est.fit(X, y)
skipped_attributes = set([])
for attr in attributes:
if attr.name in skipped_attributes:
continue
desc = " ".join(attr.desc).lower()
# As certain attributes are present "only" if a certain parameter is
# provided, this checks if the word "only" is present in the attribute
# description, and if not the attribute is required to be present.
if "only " in desc:
continue
# ignore deprecation warnings
with ignore_warnings(category=FutureWarning):
assert hasattr(est, attr.name)
fit_attr = _get_all_fitted_attributes(est)
fit_attr_names = [attr.name for attr in attributes]
undocumented_attrs = set(fit_attr).difference(fit_attr_names)
undocumented_attrs = set(undocumented_attrs).difference(skipped_attributes)
if undocumented_attrs:
raise AssertionError(
f"Undocumented attributes for {Estimator.__name__}: {undocumented_attrs}"
)
def _get_all_fitted_attributes(estimator):
"Get all the fitted attributes of an estimator including properties"
# attributes
fit_attr = list(estimator.__dict__.keys())
# properties
with warnings.catch_warnings():
warnings.filterwarnings("error", category=FutureWarning)
for name in dir(estimator.__class__):
obj = getattr(estimator.__class__, name)
if not isinstance(obj, property):
continue
# ignore properties that raises an AttributeError and deprecated
# properties
try:
getattr(estimator, name)
except (AttributeError, FutureWarning):
continue
fit_attr.append(name)
return [k for k in fit_attr if k.endswith("_") and not k.startswith("_")]
| 8,863 | 32.703422 | 86 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/tests/__init__.py | 0 | 0 | 0 | py | |
imbalanced-learn | imbalanced-learn-master/imblearn/tests/test_public_functions.py | """This is a copy of sklearn/tests/test_public_functions.py. It can be
removed when we support scikit-learn >= 1.2.
"""
from importlib import import_module
from inspect import signature
import pytest
from imblearn.utils._param_validation import (
generate_invalid_param_val,
generate_valid_param,
make_constraint,
)
PARAM_VALIDATION_FUNCTION_LIST = [
"imblearn.datasets.fetch_datasets",
"imblearn.datasets.make_imbalance",
"imblearn.metrics.classification_report_imbalanced",
"imblearn.metrics.geometric_mean_score",
"imblearn.metrics.macro_averaged_mean_absolute_error",
"imblearn.metrics.make_index_balanced_accuracy",
"imblearn.metrics.sensitivity_specificity_support",
"imblearn.metrics.sensitivity_score",
"imblearn.metrics.specificity_score",
"imblearn.pipeline.make_pipeline",
]
@pytest.mark.parametrize("func_module", PARAM_VALIDATION_FUNCTION_LIST)
def test_function_param_validation(func_module):
"""Check that an informative error is raised when the value of a parameter does not
have an appropriate type or value.
"""
module_name, func_name = func_module.rsplit(".", 1)
module = import_module(module_name)
func = getattr(module, func_name)
func_sig = signature(func)
func_params = [
p.name
for p in func_sig.parameters.values()
if p.kind not in (p.VAR_POSITIONAL, p.VAR_KEYWORD)
]
parameter_constraints = getattr(func, "_skl_parameter_constraints")
# Generate valid values for the required parameters
# The parameters `*args` and `**kwargs` are ignored since we cannot generate
# constraints.
required_params = [
p.name
for p in func_sig.parameters.values()
if p.default is p.empty and p.kind not in (p.VAR_POSITIONAL, p.VAR_KEYWORD)
]
valid_required_params = {}
for param_name in required_params:
if parameter_constraints[param_name] == "no_validation":
valid_required_params[param_name] = 1
else:
valid_required_params[param_name] = generate_valid_param(
make_constraint(parameter_constraints[param_name][0])
)
# check that there is a constraint for each parameter
if func_params:
validation_params = parameter_constraints.keys()
unexpected_params = set(validation_params) - set(func_params)
missing_params = set(func_params) - set(validation_params)
err_msg = (
"Mismatch between _parameter_constraints and the parameters of"
f" {func_name}.\nConsider the unexpected parameters {unexpected_params} and"
f" expected but missing parameters {missing_params}\n"
)
assert set(validation_params) == set(func_params), err_msg
# this object does not have a valid type for sure for all params
param_with_bad_type = type("BadType", (), {})()
for param_name in func_params:
constraints = parameter_constraints[param_name]
if constraints == "no_validation":
# This parameter is not validated
continue
match = (
rf"The '{param_name}' parameter of {func_name} must be .* Got .* instead."
)
# First, check that the error is raised if param doesn't match any valid type.
with pytest.raises(ValueError, match=match):
func(**{**valid_required_params, param_name: param_with_bad_type})
# Then, for constraints that are more than a type constraint, check that the
# error is raised if param does match a valid type but does not match any valid
# value for this type.
constraints = [make_constraint(constraint) for constraint in constraints]
for constraint in constraints:
try:
bad_value = generate_invalid_param_val(constraint)
except NotImplementedError:
continue
with pytest.raises(ValueError, match=match):
func(**{**valid_required_params, param_name: bad_value})
| 4,038 | 37.103774 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/tests/test_base.py | """Test for miscellaneous samplers objects."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
import numpy as np
import pytest
from scipy import sparse
from sklearn.datasets import load_iris, make_regression
from sklearn.linear_model import LinearRegression
from sklearn.utils import _safe_indexing
from sklearn.utils._testing import assert_allclose_dense_sparse, assert_array_equal
from sklearn.utils.multiclass import type_of_target
from imblearn import FunctionSampler
from imblearn.datasets import make_imbalance
from imblearn.pipeline import make_pipeline
from imblearn.under_sampling import RandomUnderSampler
iris = load_iris()
X, y = make_imbalance(
iris.data, iris.target, sampling_strategy={0: 10, 1: 25}, random_state=0
)
def test_function_sampler_reject_sparse():
X_sparse = sparse.csr_matrix(X)
sampler = FunctionSampler(accept_sparse=False)
with pytest.raises(
TypeError,
match="A sparse matrix was passed, but dense data is required",
):
sampler.fit_resample(X_sparse, y)
@pytest.mark.parametrize(
"X, y", [(X, y), (sparse.csr_matrix(X), y), (sparse.csc_matrix(X), y)]
)
def test_function_sampler_identity(X, y):
sampler = FunctionSampler()
X_res, y_res = sampler.fit_resample(X, y)
assert_allclose_dense_sparse(X_res, X)
assert_array_equal(y_res, y)
@pytest.mark.parametrize(
"X, y", [(X, y), (sparse.csr_matrix(X), y), (sparse.csc_matrix(X), y)]
)
def test_function_sampler_func(X, y):
def func(X, y):
return X[:10], y[:10]
sampler = FunctionSampler(func=func)
X_res, y_res = sampler.fit_resample(X, y)
assert_allclose_dense_sparse(X_res, X[:10])
assert_array_equal(y_res, y[:10])
@pytest.mark.parametrize(
"X, y", [(X, y), (sparse.csr_matrix(X), y), (sparse.csc_matrix(X), y)]
)
def test_function_sampler_func_kwargs(X, y):
def func(X, y, sampling_strategy, random_state):
rus = RandomUnderSampler(
sampling_strategy=sampling_strategy, random_state=random_state
)
return rus.fit_resample(X, y)
sampler = FunctionSampler(
func=func, kw_args={"sampling_strategy": "auto", "random_state": 0}
)
X_res, y_res = sampler.fit_resample(X, y)
X_res_2, y_res_2 = RandomUnderSampler(random_state=0).fit_resample(X, y)
assert_allclose_dense_sparse(X_res, X_res_2)
assert_array_equal(y_res, y_res_2)
def test_function_sampler_validate():
# check that we can let a pass a regression variable by turning down the
# validation
X, y = make_regression()
def dummy_sampler(X, y):
indices = np.random.choice(np.arange(X.shape[0]), size=100)
return _safe_indexing(X, indices), _safe_indexing(y, indices)
sampler = FunctionSampler(func=dummy_sampler, validate=False)
pipeline = make_pipeline(sampler, LinearRegression())
y_pred = pipeline.fit(X, y).predict(X)
assert type_of_target(y_pred) == "continuous"
def test_function_resampler_fit():
# Check that the validation is bypass when calling `fit`
# Non-regression test for:
# https://github.com/scikit-learn-contrib/imbalanced-learn/issues/782
X = np.array([[1, np.nan], [2, 3], [np.inf, 4]])
y = np.array([0, 1, 1])
def func(X, y):
return X[:1], y[:1]
sampler = FunctionSampler(func=func, validate=False)
sampler.fit(X, y)
sampler.fit_resample(X, y)
| 3,407 | 30.850467 | 83 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/tests/test_exceptions.py | """Test for the exceptions modules"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from pytest import raises
from imblearn.exceptions import raise_isinstance_error
def test_raise_isinstance_error():
var = 10.0
with raises(ValueError, match="has to be one of"):
raise_isinstance_error("var", [int], var)
| 375 | 24.066667 | 54 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/base.py | """
Base class for the over-sampling method.
"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numbers
from collections.abc import Mapping
from ..base import BaseSampler
from ..utils._param_validation import Interval, StrOptions
class BaseOverSampler(BaseSampler):
"""Base class for over-sampling algorithms.
Warning: This class should not be used directly. Use the derive classes
instead.
"""
_sampling_type = "over-sampling"
_sampling_strategy_docstring = """sampling_strategy : float, str, dict or callable, default='auto'
Sampling information to resample the data set.
- When ``float``, it corresponds to the desired ratio of the number of
samples in the minority class over the number of samples in the
majority class after resampling. Therefore, the ratio is expressed as
:math:`\\alpha_{os} = N_{rm} / N_{M}` where :math:`N_{rm}` is the
number of samples in the minority class after resampling and
:math:`N_{M}` is the number of samples in the majority class.
.. warning::
``float`` is only available for **binary** classification. An
error is raised for multi-class classification.
- When ``str``, specify the class targeted by the resampling. The
number of samples in the different classes will be equalized.
Possible choices are:
``'minority'``: resample only the minority class;
``'not minority'``: resample all classes but the minority class;
``'not majority'``: resample all classes but the majority class;
``'all'``: resample all classes;
``'auto'``: equivalent to ``'not majority'``.
- When ``dict``, the keys correspond to the targeted classes. The
values correspond to the desired number of samples for each targeted
class.
- When callable, function taking ``y`` and returns a ``dict``. The keys
correspond to the targeted classes. The values correspond to the
desired number of samples for each class.
""".strip() # noqa: E501
_parameter_constraints: dict = {
"sampling_strategy": [
Interval(numbers.Real, 0, 1, closed="right"),
StrOptions({"auto", "minority", "not minority", "not majority", "all"}),
Mapping,
callable,
],
"random_state": ["random_state"],
}
| 2,519 | 35 | 102 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/_random_over_sampler.py | """Class to perform random over-sampling."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from collections.abc import Mapping
from numbers import Real
import numpy as np
from scipy import sparse
from sklearn.utils import _safe_indexing, check_array, check_random_state
from sklearn.utils.sparsefuncs import mean_variance_axis
from ..utils import Substitution, check_target_type
from ..utils._docstring import _random_state_docstring
from ..utils._param_validation import Interval
from ..utils._validation import _check_X
from .base import BaseOverSampler
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
random_state=_random_state_docstring,
)
class RandomOverSampler(BaseOverSampler):
"""Class to perform random over-sampling.
Object to over-sample the minority class(es) by picking samples at random
with replacement. The bootstrap can be generated in a smoothed manner.
Read more in the :ref:`User Guide <random_over_sampler>`.
Parameters
----------
{sampling_strategy}
{random_state}
shrinkage : float or dict, default=None
Parameter controlling the shrinkage applied to the covariance matrix.
when a smoothed bootstrap is generated. The options are:
- if `None`, a normal bootstrap will be generated without perturbation.
It is equivalent to `shrinkage=0` as well;
- if a `float` is given, the shrinkage factor will be used for all
classes to generate the smoothed bootstrap;
- if a `dict` is given, the shrinkage factor will specific for each
class. The key correspond to the targeted class and the value is
the shrinkage factor.
The value needs of the shrinkage parameter needs to be higher or equal
to 0.
.. versionadded:: 0.8
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
sample_indices_ : ndarray of shape (n_new_samples,)
Indices of the samples selected.
.. versionadded:: 0.4
shrinkage_ : dict or None
The per-class shrinkage factor used to generate the smoothed bootstrap
sample. When `shrinkage=None` a normal bootstrap will be generated.
.. versionadded:: 0.8
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
BorderlineSMOTE : Over-sample using the borderline-SMOTE variant.
SMOTE : Over-sample using SMOTE.
SMOTENC : Over-sample using SMOTE for continuous and categorical features.
SMOTEN : Over-sample using the SMOTE variant specifically for categorical
features only.
SVMSMOTE : Over-sample using SVM-SMOTE variant.
ADASYN : Over-sample using ADASYN.
KMeansSMOTE : Over-sample applying a clustering before to oversample using
SMOTE.
Notes
-----
Supports multi-class resampling by sampling each class independently.
Supports heterogeneous data as object array containing string and numeric
data.
When generating a smoothed bootstrap, this method is also known as Random
Over-Sampling Examples (ROSE) [1]_.
.. warning::
Since smoothed bootstrap are generated by adding a small perturbation
to the drawn samples, this method is not adequate when working with
sparse matrices.
References
----------
.. [1] G Menardi, N. Torelli, "Training and assessing classification
rules with imbalanced data," Data Mining and Knowledge
Discovery, 28(1), pp.92-122, 2014.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import RandomOverSampler
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> ros = RandomOverSampler(random_state=42)
>>> X_res, y_res = ros.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 900, 1: 900}})
"""
_parameter_constraints: dict = {
**BaseOverSampler._parameter_constraints,
"shrinkage": [Interval(Real, 0, None, closed="left"), dict, None],
}
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
shrinkage=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.random_state = random_state
self.shrinkage = shrinkage
def _check_X_y(self, X, y):
y, binarize_y = check_target_type(y, indicate_one_vs_all=True)
X = _check_X(X)
self._check_n_features(X, reset=True)
self._check_feature_names(X, reset=True)
return X, y, binarize_y
def _fit_resample(self, X, y):
random_state = check_random_state(self.random_state)
if isinstance(self.shrinkage, Real):
self.shrinkage_ = {
klass: self.shrinkage for klass in self.sampling_strategy_
}
elif self.shrinkage is None or isinstance(self.shrinkage, Mapping):
self.shrinkage_ = self.shrinkage
if self.shrinkage_ is not None:
missing_shrinkage_keys = (
self.sampling_strategy_.keys() - self.shrinkage_.keys()
)
if missing_shrinkage_keys:
raise ValueError(
f"`shrinkage` should contain a shrinkage factor for "
f"each class that will be resampled. The missing "
f"classes are: {repr(missing_shrinkage_keys)}"
)
for klass, shrink_factor in self.shrinkage_.items():
if shrink_factor < 0:
raise ValueError(
f"The shrinkage factor needs to be >= 0. "
f"Got {shrink_factor} for class {klass}."
)
# smoothed bootstrap imposes to make numerical operation; we need
# to be sure to have only numerical data in X
try:
X = check_array(X, accept_sparse=["csr", "csc"], dtype="numeric")
except ValueError as exc:
raise ValueError(
"When shrinkage is not None, X needs to contain only "
"numerical data to later generate a smoothed bootstrap "
"sample."
) from exc
X_resampled = [X.copy()]
y_resampled = [y.copy()]
sample_indices = range(X.shape[0])
for class_sample, num_samples in self.sampling_strategy_.items():
target_class_indices = np.flatnonzero(y == class_sample)
bootstrap_indices = random_state.choice(
target_class_indices,
size=num_samples,
replace=True,
)
sample_indices = np.append(sample_indices, bootstrap_indices)
if self.shrinkage_ is not None:
# generate a smoothed bootstrap with a perturbation
n_samples, n_features = X.shape
smoothing_constant = (4 / ((n_features + 2) * n_samples)) ** (
1 / (n_features + 4)
)
if sparse.issparse(X):
_, X_class_variance = mean_variance_axis(
X[target_class_indices, :],
axis=0,
)
X_class_scale = np.sqrt(X_class_variance, out=X_class_variance)
else:
X_class_scale = np.std(X[target_class_indices, :], axis=0)
smoothing_matrix = np.diagflat(
self.shrinkage_[class_sample] * smoothing_constant * X_class_scale
)
X_new = random_state.randn(num_samples, n_features)
X_new = X_new.dot(smoothing_matrix) + X[bootstrap_indices, :]
if sparse.issparse(X):
X_new = sparse.csr_matrix(X_new, dtype=X.dtype)
X_resampled.append(X_new)
else:
# generate a bootstrap
X_resampled.append(_safe_indexing(X, bootstrap_indices))
y_resampled.append(_safe_indexing(y, bootstrap_indices))
self.sample_indices_ = np.array(sample_indices)
if sparse.issparse(X):
X_resampled = sparse.vstack(X_resampled, format=X.format)
else:
X_resampled = np.vstack(X_resampled)
y_resampled = np.hstack(y_resampled)
return X_resampled, y_resampled
def _more_tags(self):
return {
"X_types": ["2darray", "string", "sparse", "dataframe"],
"sample_indices": True,
"allow_nan": True,
"_xfail_checks": {
"check_complex_data": "Robust to this type of data.",
},
}
| 9,552 | 35.601533 | 86 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/__init__.py | """
The :mod:`imblearn.over_sampling` provides a set of method to
perform over-sampling.
"""
from ._adasyn import ADASYN
from ._random_over_sampler import RandomOverSampler
from ._smote import SMOTE, SMOTEN, SMOTENC, SVMSMOTE, BorderlineSMOTE, KMeansSMOTE
__all__ = [
"ADASYN",
"RandomOverSampler",
"KMeansSMOTE",
"SMOTE",
"BorderlineSMOTE",
"SVMSMOTE",
"SMOTENC",
"SMOTEN",
]
| 411 | 19.6 | 82 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/_adasyn.py | """Class to perform over-sampling using ADASYN."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numbers
import warnings
import numpy as np
from scipy import sparse
from sklearn.utils import _safe_indexing, check_random_state
from ..utils import Substitution, check_neighbors_object
from ..utils._docstring import _n_jobs_docstring, _random_state_docstring
from ..utils._param_validation import HasMethods, Interval
from .base import BaseOverSampler
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class ADASYN(BaseOverSampler):
"""Oversample using Adaptive Synthetic (ADASYN) algorithm.
This method is similar to SMOTE but it generates different number of
samples depending on an estimate of the local distribution of the class
to be oversampled.
Read more in the :ref:`User Guide <smote_adasyn>`.
Parameters
----------
{sampling_strategy}
{random_state}
n_neighbors : int or estimator object, default=5
The nearest neighbors used to define the neighborhood of samples to use
to generate the synthetic samples. You can pass:
- an `int` corresponding to the number of neighbors to use. A
`~sklearn.neighbors.NearestNeighbors` instance will be fitted in this
case.
- an instance of a compatible nearest neighbors algorithm that should
implement both methods `kneighbors` and `kneighbors_graph`. For
instance, it could correspond to a
:class:`~sklearn.neighbors.NearestNeighbors` but could be extended to
any compatible class.
{n_jobs}
.. deprecated:: 0.10
`n_jobs` has been deprecated in 0.10 and will be removed in 0.12.
It was previously used to set `n_jobs` of nearest neighbors
algorithm. From now on, you can pass an estimator where `n_jobs` is
already set instead.
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
nn_ : estimator object
Validated K-nearest Neighbours estimator linked to the parameter `n_neighbors`.
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
SMOTE : Over-sample using SMOTE.
SMOTENC : Over-sample using SMOTE for continuous and categorical features.
SMOTEN : Over-sample using the SMOTE variant specifically for categorical
features only.
SVMSMOTE : Over-sample using SVM-SMOTE variant.
BorderlineSMOTE : Over-sample using Borderline-SMOTE variant.
Notes
-----
The implementation is based on [1]_.
Supports multi-class resampling. A one-vs.-rest scheme is used.
References
----------
.. [1] He, Haibo, Yang Bai, Edwardo A. Garcia, and Shutao Li. "ADASYN:
Adaptive synthetic sampling approach for imbalanced learning," In IEEE
International Joint Conference on Neural Networks (IEEE World Congress
on Computational Intelligence), pp. 1322-1328, 2008.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import ADASYN
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000,
... random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> ada = ADASYN(random_state=42)
>>> X_res, y_res = ada.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 904, 1: 900}})
"""
_parameter_constraints: dict = {
**BaseOverSampler._parameter_constraints,
"n_neighbors": [
Interval(numbers.Integral, 1, None, closed="left"),
HasMethods(["kneighbors", "kneighbors_graph"]),
],
"n_jobs": [numbers.Integral, None],
}
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
n_neighbors=5,
n_jobs=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.random_state = random_state
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
def _validate_estimator(self):
"""Create the necessary objects for ADASYN"""
self.nn_ = check_neighbors_object(
"n_neighbors", self.n_neighbors, additional_neighbor=1
)
def _fit_resample(self, X, y):
# FIXME: to be removed in 0.12
if self.n_jobs is not None:
warnings.warn(
"The parameter `n_jobs` has been deprecated in 0.10 and will be "
"removed in 0.12. You can pass an nearest neighbors estimator where "
"`n_jobs` is already set instead.",
FutureWarning,
)
self._validate_estimator()
random_state = check_random_state(self.random_state)
X_resampled = [X.copy()]
y_resampled = [y.copy()]
for class_sample, n_samples in self.sampling_strategy_.items():
if n_samples == 0:
continue
target_class_indices = np.flatnonzero(y == class_sample)
X_class = _safe_indexing(X, target_class_indices)
self.nn_.fit(X)
nns = self.nn_.kneighbors(X_class, return_distance=False)[:, 1:]
# The ratio is computed using a one-vs-rest manner. Using majority
# in multi-class would lead to slightly different results at the
# cost of introducing a new parameter.
n_neighbors = self.nn_.n_neighbors - 1
ratio_nn = np.sum(y[nns] != class_sample, axis=1) / n_neighbors
if not np.sum(ratio_nn):
raise RuntimeError(
"Not any neigbours belong to the majority"
" class. This case will induce a NaN case"
" with a division by zero. ADASYN is not"
" suited for this specific dataset."
" Use SMOTE instead."
)
ratio_nn /= np.sum(ratio_nn)
n_samples_generate = np.rint(ratio_nn * n_samples).astype(int)
# rounding may cause new amount for n_samples
n_samples = np.sum(n_samples_generate)
if not n_samples:
raise ValueError(
"No samples will be generated with the provided ratio settings."
)
# the nearest neighbors need to be fitted only on the current class
# to find the class NN to generate new samples
self.nn_.fit(X_class)
nns = self.nn_.kneighbors(X_class, return_distance=False)[:, 1:]
enumerated_class_indices = np.arange(len(target_class_indices))
rows = np.repeat(enumerated_class_indices, n_samples_generate)
cols = random_state.choice(n_neighbors, size=n_samples)
diffs = X_class[nns[rows, cols]] - X_class[rows]
steps = random_state.uniform(size=(n_samples, 1))
if sparse.issparse(X):
sparse_func = type(X).__name__
steps = getattr(sparse, sparse_func)(steps)
X_new = X_class[rows] + steps.multiply(diffs)
else:
X_new = X_class[rows] + steps * diffs
X_new = X_new.astype(X.dtype)
y_new = np.full(n_samples, fill_value=class_sample, dtype=y.dtype)
X_resampled.append(X_new)
y_resampled.append(y_new)
if sparse.issparse(X):
X_resampled = sparse.vstack(X_resampled, format=X.format)
else:
X_resampled = np.vstack(X_resampled)
y_resampled = np.hstack(y_resampled)
return X_resampled, y_resampled
def _more_tags(self):
return {
"X_types": ["2darray"],
}
| 8,545 | 35.521368 | 87 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/tests/test_common.py | from collections import Counter
import numpy as np
import pytest
from sklearn.cluster import MiniBatchKMeans
from imblearn.over_sampling import (
ADASYN,
SMOTE,
SMOTEN,
SMOTENC,
SVMSMOTE,
BorderlineSMOTE,
KMeansSMOTE,
)
from imblearn.utils.testing import _CustomNearestNeighbors
@pytest.fixture
def numerical_data():
rng = np.random.RandomState(0)
X = rng.randn(100, 2)
y = np.repeat([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0], 5)
return X, y
@pytest.fixture
def categorical_data():
rng = np.random.RandomState(0)
feature_1 = ["A"] * 10 + ["B"] * 20 + ["C"] * 30
feature_2 = ["A"] * 40 + ["B"] * 20
feature_3 = ["A"] * 20 + ["B"] * 20 + ["C"] * 10 + ["D"] * 10
X = np.array([feature_1, feature_2, feature_3], dtype=object).T
rng.shuffle(X)
y = np.array([0] * 20 + [1] * 40, dtype=np.int32)
y_labels = np.array(["not apple", "apple"], dtype=object)
y = y_labels[y]
return X, y
@pytest.fixture
def heterogeneous_data():
rng = np.random.RandomState(42)
X = np.empty((30, 4), dtype=object)
X[:, :2] = rng.randn(30, 2)
X[:, 2] = rng.choice(["a", "b", "c"], size=30).astype(object)
X[:, 3] = rng.randint(3, size=30)
y = np.array([0] * 10 + [1] * 20)
return X, y, [2, 3]
@pytest.mark.parametrize(
"smote", [BorderlineSMOTE(), SVMSMOTE()], ids=["borderline", "svm"]
)
def test_smote_m_neighbors(numerical_data, smote):
# check that m_neighbors is properly set. Regression test for:
# https://github.com/scikit-learn-contrib/imbalanced-learn/issues/568
X, y = numerical_data
_ = smote.fit_resample(X, y)
assert smote.nn_k_.n_neighbors == 6
assert smote.nn_m_.n_neighbors == 11
@pytest.mark.parametrize(
"smote, neighbor_estimator_name",
[
(ADASYN(random_state=0), "n_neighbors"),
(BorderlineSMOTE(random_state=0), "k_neighbors"),
(
KMeansSMOTE(
kmeans_estimator=MiniBatchKMeans(n_init=1, random_state=0),
random_state=1,
),
"k_neighbors",
),
(SMOTE(random_state=0), "k_neighbors"),
(SVMSMOTE(random_state=0), "k_neighbors"),
],
ids=["adasyn", "borderline", "kmeans", "smote", "svm"],
)
def test_numerical_smote_custom_nn(numerical_data, smote, neighbor_estimator_name):
X, y = numerical_data
params = {
neighbor_estimator_name: _CustomNearestNeighbors(n_neighbors=5),
}
smote.set_params(**params)
X_res, _ = smote.fit_resample(X, y)
assert X_res.shape[0] >= 120
def test_categorical_smote_k_custom_nn(categorical_data):
X, y = categorical_data
smote = SMOTEN(k_neighbors=_CustomNearestNeighbors(n_neighbors=5))
X_res, y_res = smote.fit_resample(X, y)
assert X_res.shape == (80, 3)
assert Counter(y_res) == {"apple": 40, "not apple": 40}
def test_heterogeneous_smote_k_custom_nn(heterogeneous_data):
X, y, categorical_features = heterogeneous_data
smote = SMOTENC(
categorical_features, k_neighbors=_CustomNearestNeighbors(n_neighbors=5)
)
X_res, y_res = smote.fit_resample(X, y)
assert X_res.shape == (40, 4)
assert Counter(y_res) == {0: 20, 1: 20}
@pytest.mark.parametrize(
"smote",
[BorderlineSMOTE(random_state=0), SVMSMOTE(random_state=0)],
ids=["borderline", "svm"],
)
def test_numerical_smote_extra_custom_nn(numerical_data, smote):
X, y = numerical_data
smote.set_params(m_neighbors=_CustomNearestNeighbors(n_neighbors=5))
X_res, y_res = smote.fit_resample(X, y)
assert X_res.shape == (120, 2)
assert Counter(y_res) == {0: 60, 1: 60}
# FIXME: to be removed in 0.12
@pytest.mark.parametrize(
"sampler",
[
ADASYN(random_state=0),
BorderlineSMOTE(random_state=0),
SMOTE(random_state=0),
SMOTEN(random_state=0),
SMOTENC([0], random_state=0),
SVMSMOTE(random_state=0),
],
)
def test_n_jobs_deprecation_warning(numerical_data, sampler):
X, y = numerical_data
sampler.set_params(n_jobs=2)
warning_msg = "The parameter `n_jobs` has been deprecated"
with pytest.warns(FutureWarning, match=warning_msg):
sampler.fit_resample(X, y)
| 4,235 | 28.013699 | 83 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/tests/test_random_over_sampler.py | """Test the module under sampler."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from collections import Counter
from datetime import datetime
import numpy as np
import pytest
from sklearn.datasets import make_classification
from sklearn.utils._testing import (
_convert_container,
assert_allclose,
assert_array_equal,
)
from imblearn.over_sampling import RandomOverSampler
RND_SEED = 0
@pytest.fixture
def data():
X = np.array(
[
[0.04352327, -0.20515826],
[0.92923648, 0.76103773],
[0.20792588, 1.49407907],
[0.47104475, 0.44386323],
[0.22950086, 0.33367433],
[0.15490546, 0.3130677],
[0.09125309, -0.85409574],
[0.12372842, 0.6536186],
[0.13347175, 0.12167502],
[0.094035, -2.55298982],
]
)
Y = np.array([1, 0, 1, 0, 1, 1, 1, 1, 0, 1])
return X, Y
def test_ros_init():
sampling_strategy = "auto"
ros = RandomOverSampler(sampling_strategy=sampling_strategy, random_state=RND_SEED)
assert ros.random_state == RND_SEED
@pytest.mark.parametrize(
"params", [{"shrinkage": None}, {"shrinkage": 0}, {"shrinkage": {0: 0}}]
)
@pytest.mark.parametrize("X_type", ["array", "dataframe"])
def test_ros_fit_resample(X_type, data, params):
X, Y = data
X_ = _convert_container(X, X_type)
ros = RandomOverSampler(**params, random_state=RND_SEED)
X_resampled, y_resampled = ros.fit_resample(X_, Y)
X_gt = np.array(
[
[0.04352327, -0.20515826],
[0.92923648, 0.76103773],
[0.20792588, 1.49407907],
[0.47104475, 0.44386323],
[0.22950086, 0.33367433],
[0.15490546, 0.3130677],
[0.09125309, -0.85409574],
[0.12372842, 0.6536186],
[0.13347175, 0.12167502],
[0.094035, -2.55298982],
[0.92923648, 0.76103773],
[0.47104475, 0.44386323],
[0.92923648, 0.76103773],
[0.47104475, 0.44386323],
]
)
y_gt = np.array([1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0])
if X_type == "dataframe":
assert hasattr(X_resampled, "loc")
# FIXME: we should use to_numpy with pandas >= 0.25
X_resampled = X_resampled.values
assert_allclose(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
if params["shrinkage"] is None:
assert ros.shrinkage_ is None
else:
assert ros.shrinkage_ == {0: 0}
@pytest.mark.parametrize("params", [{"shrinkage": None}, {"shrinkage": 0}])
def test_ros_fit_resample_half(data, params):
X, Y = data
sampling_strategy = {0: 3, 1: 7}
ros = RandomOverSampler(
**params, sampling_strategy=sampling_strategy, random_state=RND_SEED
)
X_resampled, y_resampled = ros.fit_resample(X, Y)
X_gt = np.array(
[
[0.04352327, -0.20515826],
[0.92923648, 0.76103773],
[0.20792588, 1.49407907],
[0.47104475, 0.44386323],
[0.22950086, 0.33367433],
[0.15490546, 0.3130677],
[0.09125309, -0.85409574],
[0.12372842, 0.6536186],
[0.13347175, 0.12167502],
[0.094035, -2.55298982],
]
)
y_gt = np.array([1, 0, 1, 0, 1, 1, 1, 1, 0, 1])
assert_allclose(X_resampled, X_gt)
assert_array_equal(y_resampled, y_gt)
if params["shrinkage"] is None:
assert ros.shrinkage_ is None
else:
assert ros.shrinkage_ == {0: 0, 1: 0}
@pytest.mark.parametrize("params", [{"shrinkage": None}, {"shrinkage": 0}])
def test_multiclass_fit_resample(data, params):
# check the random over-sampling with a multiclass problem
X, Y = data
y = Y.copy()
y[5] = 2
y[6] = 2
ros = RandomOverSampler(**params, random_state=RND_SEED)
X_resampled, y_resampled = ros.fit_resample(X, y)
count_y_res = Counter(y_resampled)
assert count_y_res[0] == 5
assert count_y_res[1] == 5
assert count_y_res[2] == 5
if params["shrinkage"] is None:
assert ros.shrinkage_ is None
else:
assert ros.shrinkage_ == {0: 0, 2: 0}
def test_random_over_sampling_heterogeneous_data():
# check that resampling with heterogeneous dtype is working with basic
# resampling
X_hetero = np.array(
[["xxx", 1, 1.0], ["yyy", 2, 2.0], ["zzz", 3, 3.0]], dtype=object
)
y = np.array([0, 0, 1])
ros = RandomOverSampler(random_state=RND_SEED)
X_res, y_res = ros.fit_resample(X_hetero, y)
assert X_res.shape[0] == 4
assert y_res.shape[0] == 4
assert X_res.dtype == object
assert X_res[-1, 0] in X_hetero[:, 0]
def test_random_over_sampling_nan_inf(data):
# check that we can oversample even with missing or infinite data
# regression tests for #605
X, Y = data
rng = np.random.RandomState(42)
n_not_finite = X.shape[0] // 3
row_indices = rng.choice(np.arange(X.shape[0]), size=n_not_finite)
col_indices = rng.randint(0, X.shape[1], size=n_not_finite)
not_finite_values = rng.choice([np.nan, np.inf], size=n_not_finite)
X_ = X.copy()
X_[row_indices, col_indices] = not_finite_values
ros = RandomOverSampler(random_state=0)
X_res, y_res = ros.fit_resample(X_, Y)
assert y_res.shape == (14,)
assert X_res.shape == (14, 2)
assert np.any(~np.isfinite(X_res))
def test_random_over_sampling_heterogeneous_data_smoothed_bootstrap():
# check that we raise an error when heterogeneous dtype data are given
# and a smoothed bootstrap is requested
X_hetero = np.array(
[["xxx", 1, 1.0], ["yyy", 2, 2.0], ["zzz", 3, 3.0]], dtype=object
)
y = np.array([0, 0, 1])
ros = RandomOverSampler(shrinkage=1, random_state=RND_SEED)
err_msg = "When shrinkage is not None, X needs to contain only numerical"
with pytest.raises(ValueError, match=err_msg):
ros.fit_resample(X_hetero, y)
@pytest.mark.parametrize("X_type", ["dataframe", "array", "sparse_csr", "sparse_csc"])
def test_random_over_sampler_smoothed_bootstrap(X_type, data):
# check that smoothed bootstrap is working for numerical array
X, y = data
sampler = RandomOverSampler(shrinkage=1)
X = _convert_container(X, X_type)
X_res, y_res = sampler.fit_resample(X, y)
assert y_res.shape == (14,)
assert X_res.shape == (14, 2)
if X_type == "dataframe":
assert hasattr(X_res, "loc")
def test_random_over_sampler_equivalence_shrinkage(data):
# check that a shrinkage factor of 0 is equivalent to not create a smoothed
# bootstrap
X, y = data
ros_not_shrink = RandomOverSampler(shrinkage=0, random_state=0)
ros_hard_bootstrap = RandomOverSampler(shrinkage=None, random_state=0)
X_res_not_shrink, y_res_not_shrink = ros_not_shrink.fit_resample(X, y)
X_res, y_res = ros_hard_bootstrap.fit_resample(X, y)
assert_allclose(X_res_not_shrink, X_res)
assert_allclose(y_res_not_shrink, y_res)
assert y_res.shape == (14,)
assert X_res.shape == (14, 2)
assert y_res_not_shrink.shape == (14,)
assert X_res_not_shrink.shape == (14, 2)
def test_random_over_sampler_shrinkage_behaviour(data):
# check the behaviour of the shrinkage parameter
# the covariance of the data generated with the larger shrinkage factor
# should also be larger.
X, y = data
ros = RandomOverSampler(shrinkage=1, random_state=0)
X_res_shink_1, y_res_shrink_1 = ros.fit_resample(X, y)
ros.set_params(shrinkage=5)
X_res_shink_5, y_res_shrink_5 = ros.fit_resample(X, y)
disperstion_shrink_1 = np.linalg.det(np.cov(X_res_shink_1[y_res_shrink_1 == 0].T))
disperstion_shrink_5 = np.linalg.det(np.cov(X_res_shink_5[y_res_shrink_5 == 0].T))
assert disperstion_shrink_1 < disperstion_shrink_5
@pytest.mark.parametrize(
"shrinkage, err_msg",
[
({}, "`shrinkage` should contain a shrinkage factor for each class"),
({0: -1}, "The shrinkage factor needs to be >= 0"),
],
)
def test_random_over_sampler_shrinkage_error(data, shrinkage, err_msg):
# check the validation of the shrinkage parameter
X, y = data
ros = RandomOverSampler(shrinkage=shrinkage)
with pytest.raises(ValueError, match=err_msg):
ros.fit_resample(X, y)
@pytest.mark.parametrize(
"sampling_strategy", ["auto", "minority", "not minority", "not majority", "all"]
)
def test_random_over_sampler_strings(sampling_strategy):
"""Check that we support all supposed strings as `sampling_strategy` in
a sampler inheriting from `BaseOverSampler`."""
X, y = make_classification(
n_samples=100,
n_clusters_per_class=1,
n_classes=3,
weights=[0.1, 0.3, 0.6],
random_state=0,
)
RandomOverSampler(sampling_strategy=sampling_strategy).fit_resample(X, y)
def test_random_over_sampling_datetime():
"""Check that we don't convert input data and only sample from it."""
pd = pytest.importorskip("pandas")
X = pd.DataFrame({"label": [0, 0, 0, 1], "td": [datetime.now()] * 4})
y = X["label"]
ros = RandomOverSampler(random_state=0)
X_res, y_res = ros.fit_resample(X, y)
pd.testing.assert_series_equal(X_res.dtypes, X.dtypes)
pd.testing.assert_index_equal(X_res.index, y_res.index)
assert_array_equal(y_res.to_numpy(), np.array([0, 0, 0, 1, 1, 1]))
| 9,430 | 31.52069 | 87 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/tests/__init__.py | 0 | 0 | 0 | py | |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/tests/test_adasyn.py | """Test the module under sampler."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.utils._testing import assert_allclose, assert_array_equal
from imblearn.over_sampling import ADASYN
RND_SEED = 0
X = np.array(
[
[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
]
)
Y = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0])
R_TOL = 1e-4
def test_ada_init():
sampling_strategy = "auto"
ada = ADASYN(sampling_strategy=sampling_strategy, random_state=RND_SEED)
assert ada.random_state == RND_SEED
def test_ada_fit_resample():
ada = ADASYN(random_state=RND_SEED)
X_resampled, y_resampled = ada.fit_resample(X, Y)
X_gt = np.array(
[
[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
[0.88161986, -0.2829741],
[0.35681689, -0.18814597],
[1.4148276, 0.05308106],
[0.3136591, -0.31327875],
]
)
y_gt = np.array(
[0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0]
)
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_ada_fit_resample_nn_obj():
nn = NearestNeighbors(n_neighbors=6)
ada = ADASYN(random_state=RND_SEED, n_neighbors=nn)
X_resampled, y_resampled = ada.fit_resample(X, Y)
X_gt = np.array(
[
[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
[0.88161986, -0.2829741],
[0.35681689, -0.18814597],
[1.4148276, 0.05308106],
[0.3136591, -0.31327875],
]
)
y_gt = np.array(
[0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0]
)
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
| 3,969 | 31.540984 | 80 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/_smote/base.py | """Base class and original SMOTE methods for over-sampling"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Fernando Nogueira
# Christos Aridas
# Dzianis Dudnik
# License: MIT
import math
import numbers
import warnings
import numpy as np
from scipy import sparse
from sklearn.base import clone
from sklearn.exceptions import DataConversionWarning
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
from sklearn.utils import (
_get_column_indices,
_safe_indexing,
check_array,
check_random_state,
)
from sklearn.utils.sparsefuncs_fast import (
csr_mean_variance_axis0,
)
from sklearn.utils.validation import _num_features
from ...metrics.pairwise import ValueDifferenceMetric
from ...utils import Substitution, check_neighbors_object, check_target_type
from ...utils._docstring import _n_jobs_docstring, _random_state_docstring
from ...utils._param_validation import HasMethods, Interval, StrOptions
from ...utils._validation import _check_X
from ...utils.fixes import _is_pandas_df, _mode
from ..base import BaseOverSampler
class BaseSMOTE(BaseOverSampler):
"""Base class for the different SMOTE algorithms."""
_parameter_constraints: dict = {
**BaseOverSampler._parameter_constraints,
"k_neighbors": [
Interval(numbers.Integral, 1, None, closed="left"),
HasMethods(["kneighbors", "kneighbors_graph"]),
],
"n_jobs": [numbers.Integral, None],
}
def __init__(
self,
sampling_strategy="auto",
random_state=None,
k_neighbors=5,
n_jobs=None,
):
super().__init__(sampling_strategy=sampling_strategy)
self.random_state = random_state
self.k_neighbors = k_neighbors
self.n_jobs = n_jobs
def _validate_estimator(self):
"""Check the NN estimators shared across the different SMOTE
algorithms.
"""
self.nn_k_ = check_neighbors_object(
"k_neighbors", self.k_neighbors, additional_neighbor=1
)
def _make_samples(
self, X, y_dtype, y_type, nn_data, nn_num, n_samples, step_size=1.0, y=None
):
"""A support function that returns artificial samples constructed along
the line connecting nearest neighbours.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Points from which the points will be created.
y_dtype : dtype
The data type of the targets.
y_type : str or int
The minority target value, just so the function can return the
target values for the synthetic variables with correct length in
a clear format.
nn_data : ndarray of shape (n_samples_all, n_features)
Data set carrying all the neighbours to be used
nn_num : ndarray of shape (n_samples_all, k_nearest_neighbours)
The nearest neighbours of each sample in `nn_data`.
n_samples : int
The number of samples to generate.
step_size : float, default=1.0
The step size to create samples.
y : ndarray of shape (n_samples_all,), default=None
The true target associated with `nn_data`. Used by Borderline SMOTE-2 to
weight the distances in the sample generation process.
Returns
-------
X_new : {ndarray, sparse matrix} of shape (n_samples_new, n_features)
Synthetically generated samples.
y_new : ndarray of shape (n_samples_new,)
Target values for synthetic samples.
"""
random_state = check_random_state(self.random_state)
samples_indices = random_state.randint(low=0, high=nn_num.size, size=n_samples)
# np.newaxis for backwards compatability with random_state
steps = step_size * random_state.uniform(size=n_samples)[:, np.newaxis]
rows = np.floor_divide(samples_indices, nn_num.shape[1])
cols = np.mod(samples_indices, nn_num.shape[1])
X_new = self._generate_samples(X, nn_data, nn_num, rows, cols, steps, y_type, y)
y_new = np.full(n_samples, fill_value=y_type, dtype=y_dtype)
return X_new, y_new
def _generate_samples(
self, X, nn_data, nn_num, rows, cols, steps, y_type=None, y=None
):
r"""Generate a synthetic sample.
The rule for the generation is:
.. math::
\mathbf{s_{s}} = \mathbf{s_{i}} + \mathcal{u}(0, 1) \times
(\mathbf{s_{i}} - \mathbf{s_{nn}}) \,
where \mathbf{s_{s}} is the new synthetic samples, \mathbf{s_{i}} is
the current sample, \mathbf{s_{nn}} is a randomly selected neighbors of
\mathbf{s_{i}} and \mathcal{u}(0, 1) is a random number between [0, 1).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Points from which the points will be created.
nn_data : ndarray of shape (n_samples_all, n_features)
Data set carrying all the neighbours to be used.
nn_num : ndarray of shape (n_samples_all, k_nearest_neighbours)
The nearest neighbours of each sample in `nn_data`.
rows : ndarray of shape (n_samples,), dtype=int
Indices pointing at feature vector in X which will be used
as a base for creating new samples.
cols : ndarray of shape (n_samples,), dtype=int
Indices pointing at which nearest neighbor of base feature vector
will be used when creating new samples.
steps : ndarray of shape (n_samples,), dtype=float
Step sizes for new samples.
y_type : str, int or None, default=None
Class label of the current target classes for which we want to generate
samples.
y : ndarray of shape (n_samples_all,), default=None
The true target associated with `nn_data`. Used by Borderline SMOTE-2 to
weight the distances in the sample generation process.
Returns
-------
X_new : {ndarray, sparse matrix} of shape (n_samples, n_features)
Synthetically generated samples.
"""
diffs = nn_data[nn_num[rows, cols]] - X[rows]
if y is not None: # only entering for BorderlineSMOTE-2
random_state = check_random_state(self.random_state)
mask_pair_samples = y[nn_num[rows, cols]] != y_type
diffs[mask_pair_samples] *= random_state.uniform(
low=0.0, high=0.5, size=(mask_pair_samples.sum(), 1)
)
if sparse.issparse(X):
sparse_func = type(X).__name__
steps = getattr(sparse, sparse_func)(steps)
X_new = X[rows] + steps.multiply(diffs)
else:
X_new = X[rows] + steps * diffs
return X_new.astype(X.dtype)
def _in_danger_noise(self, nn_estimator, samples, target_class, y, kind="danger"):
"""Estimate if a set of sample are in danger or noise.
Used by BorderlineSMOTE and SVMSMOTE.
Parameters
----------
nn_estimator : estimator object
An estimator that inherits from
:class:`~sklearn.neighbors.base.KNeighborsMixin` use to determine
if a sample is in danger/noise.
samples : {array-like, sparse matrix} of shape (n_samples, n_features)
The samples to check if either they are in danger or not.
target_class : int or str
The target corresponding class being over-sampled.
y : array-like of shape (n_samples,)
The true label in order to check the neighbour labels.
kind : {'danger', 'noise'}, default='danger'
The type of classification to use. Can be either:
- If 'danger', check if samples are in danger,
- If 'noise', check if samples are noise.
Returns
-------
output : ndarray of shape (n_samples,)
A boolean array where True refer to samples in danger or noise.
"""
x = nn_estimator.kneighbors(samples, return_distance=False)[:, 1:]
nn_label = (y[x] != target_class).astype(int)
n_maj = np.sum(nn_label, axis=1)
if kind == "danger":
# Samples are in danger for m/2 <= m' < m
return np.bitwise_and(
n_maj >= (nn_estimator.n_neighbors - 1) / 2,
n_maj < nn_estimator.n_neighbors - 1,
)
else: # kind == "noise":
# Samples are noise for m = m'
return n_maj == nn_estimator.n_neighbors - 1
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class SMOTE(BaseSMOTE):
"""Class to perform over-sampling using SMOTE.
This object is an implementation of SMOTE - Synthetic Minority
Over-sampling Technique as presented in [1]_.
Read more in the :ref:`User Guide <smote_adasyn>`.
Parameters
----------
{sampling_strategy}
{random_state}
k_neighbors : int or object, default=5
The nearest neighbors used to define the neighborhood of samples to use
to generate the synthetic samples. You can pass:
- an `int` corresponding to the number of neighbors to use. A
`~sklearn.neighbors.NearestNeighbors` instance will be fitted in this
case.
- an instance of a compatible nearest neighbors algorithm that should
implement both methods `kneighbors` and `kneighbors_graph`. For
instance, it could correspond to a
:class:`~sklearn.neighbors.NearestNeighbors` but could be extended to
any compatible class.
{n_jobs}
.. deprecated:: 0.10
`n_jobs` has been deprecated in 0.10 and will be removed in 0.12.
It was previously used to set `n_jobs` of nearest neighbors
algorithm. From now on, you can pass an estimator where `n_jobs` is
already set instead.
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
nn_k_ : estimator object
Validated k-nearest neighbours created from the `k_neighbors` parameter.
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
SMOTENC : Over-sample using SMOTE for continuous and categorical features.
SMOTEN : Over-sample using the SMOTE variant specifically for categorical
features only.
BorderlineSMOTE : Over-sample using the borderline-SMOTE variant.
SVMSMOTE : Over-sample using the SVM-SMOTE variant.
ADASYN : Over-sample using ADASYN.
KMeansSMOTE : Over-sample applying a clustering before to oversample using
SMOTE.
Notes
-----
See the original papers: [1]_ for more details.
Supports multi-class resampling. A one-vs.-rest scheme is used as
originally proposed in [1]_.
References
----------
.. [1] N. V. Chawla, K. W. Bowyer, L. O.Hall, W. P. Kegelmeyer, "SMOTE:
synthetic minority over-sampling technique," Journal of artificial
intelligence research, 321-357, 2002.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import SMOTE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> sm = SMOTE(random_state=42)
>>> X_res, y_res = sm.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 900, 1: 900}})
"""
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
k_neighbors=5,
n_jobs=None,
):
super().__init__(
sampling_strategy=sampling_strategy,
random_state=random_state,
k_neighbors=k_neighbors,
n_jobs=n_jobs,
)
def _fit_resample(self, X, y):
# FIXME: to be removed in 0.12
if self.n_jobs is not None:
warnings.warn(
"The parameter `n_jobs` has been deprecated in 0.10 and will be "
"removed in 0.12. You can pass an nearest neighbors estimator where "
"`n_jobs` is already set instead.",
FutureWarning,
)
self._validate_estimator()
X_resampled = [X.copy()]
y_resampled = [y.copy()]
for class_sample, n_samples in self.sampling_strategy_.items():
if n_samples == 0:
continue
target_class_indices = np.flatnonzero(y == class_sample)
X_class = _safe_indexing(X, target_class_indices)
self.nn_k_.fit(X_class)
nns = self.nn_k_.kneighbors(X_class, return_distance=False)[:, 1:]
X_new, y_new = self._make_samples(
X_class, y.dtype, class_sample, X_class, nns, n_samples, 1.0
)
X_resampled.append(X_new)
y_resampled.append(y_new)
if sparse.issparse(X):
X_resampled = sparse.vstack(X_resampled, format=X.format)
else:
X_resampled = np.vstack(X_resampled)
y_resampled = np.hstack(y_resampled)
return X_resampled, y_resampled
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class SMOTENC(SMOTE):
"""Synthetic Minority Over-sampling Technique for Nominal and Continuous.
Unlike :class:`SMOTE`, SMOTE-NC for dataset containing numerical and
categorical features. However, it is not designed to work with only
categorical features.
Read more in the :ref:`User Guide <smote_adasyn>`.
.. versionadded:: 0.4
Parameters
----------
categorical_features : "infer" or array-like of shape (n_cat_features,) or \
(n_features,), dtype={{bool, int, str}}
Specified which features are categorical. Can either be:
- "auto" (default) to automatically detect categorical features. Only
supported when `X` is a :class:`pandas.DataFrame` and it corresponds
to columns that have a :class:`pandas.CategoricalDtype`;
- array of `int` corresponding to the indices specifying the categorical
features;
- array of `str` corresponding to the feature names. `X` should be a pandas
:class:`pandas.DataFrame` in this case.
- mask array of shape (n_features, ) and ``bool`` dtype for which
``True`` indicates the categorical features.
categorical_encoder : estimator, default=None
One-hot encoder used to encode the categorical features. If `None`, a
:class:`~sklearn.preprocessing.OneHotEncoder` is used with default parameters
apart from `handle_unknown` which is set to 'ignore'.
{sampling_strategy}
{random_state}
k_neighbors : int or object, default=5
The nearest neighbors used to define the neighborhood of samples to use
to generate the synthetic samples. You can pass:
- an `int` corresponding to the number of neighbors to use. A
`~sklearn.neighbors.NearestNeighbors` instance will be fitted in this
case.
- an instance of a compatible nearest neighbors algorithm that should
implement both methods `kneighbors` and `kneighbors_graph`. For
instance, it could correspond to a
:class:`~sklearn.neighbors.NearestNeighbors` but could be extended to
any compatible class.
{n_jobs}
.. deprecated:: 0.10
`n_jobs` has been deprecated in 0.10 and will be removed in 0.12.
It was previously used to set `n_jobs` of nearest neighbors
algorithm. From now on, you can pass an estimator where `n_jobs` is
already set instead.
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
nn_k_ : estimator object
Validated k-nearest neighbours created from the `k_neighbors` parameter.
ohe_ : :class:`~sklearn.preprocessing.OneHotEncoder`
The one-hot encoder used to encode the categorical features.
.. deprecated:: 0.11
`ohe_` is deprecated in 0.11 and will be removed in 0.13. Use
`categorical_encoder_` instead.
categorical_encoder_ : estimator
The encoder used to encode the categorical features.
categorical_features_ : ndarray of shape (n_cat_features,), dtype=np.int64
Indices of the categorical features.
continuous_features_ : ndarray of shape (n_cont_features,), dtype=np.int64
Indices of the continuous features.
median_std_ : dict of int -> float
Median of the standard deviation of the continuous features for each
class to be over-sampled.
n_features_ : int
Number of features observed at `fit`.
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
SMOTE : Over-sample using SMOTE.
SMOTEN : Over-sample using the SMOTE variant specifically for categorical
features only.
SVMSMOTE : Over-sample using SVM-SMOTE variant.
BorderlineSMOTE : Over-sample using Borderline-SMOTE variant.
ADASYN : Over-sample using ADASYN.
KMeansSMOTE : Over-sample applying a clustering before to oversample using
SMOTE.
Notes
-----
See the original paper [1]_ for more details.
Supports multi-class resampling. A one-vs.-rest scheme is used as
originally proposed in [1]_.
See
:ref:`sphx_glr_auto_examples_over-sampling_plot_comparison_over_sampling.py`,
and
:ref:`sphx_glr_auto_examples_over-sampling_plot_illustration_generation_sample.py`.
References
----------
.. [1] N. V. Chawla, K. W. Bowyer, L. O.Hall, W. P. Kegelmeyer, "SMOTE:
synthetic minority over-sampling technique," Journal of artificial
intelligence research, 321-357, 2002.
Examples
--------
>>> from collections import Counter
>>> from numpy.random import RandomState
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import SMOTENC
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print(f'Original dataset shape {{X.shape}}')
Original dataset shape (1000, 20)
>>> print(f'Original dataset samples per class {{Counter(y)}}')
Original dataset samples per class Counter({{1: 900, 0: 100}})
>>> # simulate the 2 last columns to be categorical features
>>> X[:, -2:] = RandomState(10).randint(0, 4, size=(1000, 2))
>>> sm = SMOTENC(random_state=42, categorical_features=[18, 19])
>>> X_res, y_res = sm.fit_resample(X, y)
>>> print(f'Resampled dataset samples per class {{Counter(y_res)}}')
Resampled dataset samples per class Counter({{0: 900, 1: 900}})
"""
_required_parameters = ["categorical_features"]
_parameter_constraints: dict = {
**SMOTE._parameter_constraints,
"categorical_features": ["array-like", StrOptions({"auto"})],
"categorical_encoder": [
HasMethods(["fit_transform", "inverse_transform"]),
None,
],
}
def __init__(
self,
categorical_features,
*,
categorical_encoder=None,
sampling_strategy="auto",
random_state=None,
k_neighbors=5,
n_jobs=None,
):
super().__init__(
sampling_strategy=sampling_strategy,
random_state=random_state,
k_neighbors=k_neighbors,
n_jobs=n_jobs,
)
self.categorical_features = categorical_features
self.categorical_encoder = categorical_encoder
def _check_X_y(self, X, y):
"""Overwrite the checking to let pass some string for categorical
features.
"""
y, binarize_y = check_target_type(y, indicate_one_vs_all=True)
X = _check_X(X)
self._check_n_features(X, reset=True)
self._check_feature_names(X, reset=True)
return X, y, binarize_y
def _validate_column_types(self, X):
"""Compute the indices of the categorical and continuous features."""
if self.categorical_features == "auto":
if not _is_pandas_df(X):
raise ValueError(
"When `categorical_features='auto'`, the input data "
f"should be a pandas.DataFrame. Got {type(X)} instead."
)
import pandas as pd # safely import pandas now
are_columns_categorical = np.array(
[isinstance(col_dtype, pd.CategoricalDtype) for col_dtype in X.dtypes]
)
self.categorical_features_ = np.flatnonzero(are_columns_categorical)
self.continuous_features_ = np.flatnonzero(~are_columns_categorical)
else:
self.categorical_features_ = np.array(
_get_column_indices(X, self.categorical_features)
)
self.continuous_features_ = np.setdiff1d(
np.arange(self.n_features_), self.categorical_features_
)
def _validate_estimator(self):
super()._validate_estimator()
if self.categorical_features_.size == self.n_features_in_:
raise ValueError(
"SMOTE-NC is not designed to work only with categorical "
"features. It requires some numerical features."
)
elif self.categorical_features_.size == 0:
raise ValueError(
"SMOTE-NC is not designed to work only with numerical "
"features. It requires some categorical features."
)
def _fit_resample(self, X, y):
# FIXME: to be removed in 0.12
if self.n_jobs is not None:
warnings.warn(
"The parameter `n_jobs` has been deprecated in 0.10 and will be "
"removed in 0.12. You can pass an nearest neighbors estimator where "
"`n_jobs` is already set instead.",
FutureWarning,
)
self.n_features_ = _num_features(X)
self._validate_column_types(X)
self._validate_estimator()
X_continuous = _safe_indexing(X, self.continuous_features_, axis=1)
X_continuous = check_array(X_continuous, accept_sparse=["csr", "csc"])
X_categorical = _safe_indexing(X, self.categorical_features_, axis=1)
if X_continuous.dtype.name != "object":
dtype_ohe = X_continuous.dtype
else:
dtype_ohe = np.float64
if self.categorical_encoder is None:
self.categorical_encoder_ = OneHotEncoder(
handle_unknown="ignore", dtype=dtype_ohe
)
else:
self.categorical_encoder_ = clone(self.categorical_encoder)
# the input of the OneHotEncoder needs to be dense
X_ohe = self.categorical_encoder_.fit_transform(
X_categorical.toarray() if sparse.issparse(X_categorical) else X_categorical
)
if not sparse.issparse(X_ohe):
X_ohe = sparse.csr_matrix(X_ohe, dtype=dtype_ohe)
X_encoded = sparse.hstack((X_continuous, X_ohe), format="csr", dtype=dtype_ohe)
X_resampled = [X_encoded.copy()]
y_resampled = [y.copy()]
# SMOTE resampling starts here
self.median_std_ = {}
for class_sample, n_samples in self.sampling_strategy_.items():
if n_samples == 0:
continue
target_class_indices = np.flatnonzero(y == class_sample)
X_class = _safe_indexing(X_encoded, target_class_indices)
_, var = csr_mean_variance_axis0(
X_class[:, : self.continuous_features_.size]
)
self.median_std_[class_sample] = np.median(np.sqrt(var))
# In the edge case where the median of the std is equal to 0, the 1s
# entries will be also nullified. In this case, we store the original
# categorical encoding which will be later used for inverting the OHE
if math.isclose(self.median_std_[class_sample], 0):
# This variable will be used when generating data
self._X_categorical_minority_encoded = X_class[
:, self.continuous_features_.size :
].toarray()
# we can replace the 1 entries of the categorical features with the
# median of the standard deviation. It will ensure that whenever
# distance is computed between 2 samples, the difference will be equal
# to the median of the standard deviation as in the original paper.
X_class_categorical = X_class[:, self.continuous_features_.size :]
# With one-hot encoding, the median will be repeated twice. We need
# to divide by sqrt(2) such that we only have one median value
# contributing to the Euclidean distance
X_class_categorical.data[:] = self.median_std_[class_sample] / np.sqrt(2)
X_class[:, self.continuous_features_.size :] = X_class_categorical
self.nn_k_.fit(X_class)
nns = self.nn_k_.kneighbors(X_class, return_distance=False)[:, 1:]
X_new, y_new = self._make_samples(
X_class, y.dtype, class_sample, X_class, nns, n_samples, 1.0
)
X_resampled.append(X_new)
y_resampled.append(y_new)
X_resampled = sparse.vstack(X_resampled, format=X_encoded.format)
y_resampled = np.hstack(y_resampled)
# SMOTE resampling ends here
# reverse the encoding of the categorical features
X_res_cat = X_resampled[:, self.continuous_features_.size :]
X_res_cat.data = np.ones_like(X_res_cat.data)
X_res_cat_dec = self.categorical_encoder_.inverse_transform(X_res_cat)
if sparse.issparse(X):
X_resampled = sparse.hstack(
(
X_resampled[:, : self.continuous_features_.size],
X_res_cat_dec,
),
format="csr",
)
else:
X_resampled = np.hstack(
(
X_resampled[:, : self.continuous_features_.size].toarray(),
X_res_cat_dec,
)
)
indices_reordered = np.argsort(
np.hstack((self.continuous_features_, self.categorical_features_))
)
if sparse.issparse(X_resampled):
# the matrix is supposed to be in the CSR format after the stacking
col_indices = X_resampled.indices.copy()
for idx, col_idx in enumerate(indices_reordered):
mask = X_resampled.indices == col_idx
col_indices[mask] = idx
X_resampled.indices = col_indices
else:
X_resampled = X_resampled[:, indices_reordered]
return X_resampled, y_resampled
def _generate_samples(self, X, nn_data, nn_num, rows, cols, steps, y_type, y=None):
"""Generate a synthetic sample with an additional steps for the
categorical features.
Each new sample is generated the same way than in SMOTE. However, the
categorical features are mapped to the most frequent nearest neighbors
of the majority class.
"""
rng = check_random_state(self.random_state)
X_new = super()._generate_samples(X, nn_data, nn_num, rows, cols, steps)
# change in sparsity structure more efficient with LIL than CSR
X_new = X_new.tolil() if sparse.issparse(X_new) else X_new
# convert to dense array since scipy.sparse doesn't handle 3D
nn_data = nn_data.toarray() if sparse.issparse(nn_data) else nn_data
# In the case that the median std was equal to zeros, we have to
# create non-null entry based on the encoded of OHE
if math.isclose(self.median_std_[y_type], 0):
nn_data[
:, self.continuous_features_.size :
] = self._X_categorical_minority_encoded
all_neighbors = nn_data[nn_num[rows]]
categories_size = [self.continuous_features_.size] + [
cat.size for cat in self.categorical_encoder_.categories_
]
for start_idx, end_idx in zip(
np.cumsum(categories_size)[:-1], np.cumsum(categories_size)[1:]
):
col_maxs = all_neighbors[:, :, start_idx:end_idx].sum(axis=1)
# tie breaking argmax
is_max = np.isclose(col_maxs, col_maxs.max(axis=1, keepdims=True))
max_idxs = rng.permutation(np.argwhere(is_max))
xs, idx_sels = np.unique(max_idxs[:, 0], return_index=True)
col_sels = max_idxs[idx_sels, 1]
ys = start_idx + col_sels
X_new[:, start_idx:end_idx] = 0
X_new[xs, ys] = 1
return X_new
@property
def ohe_(self):
"""One-hot encoder used to encode the categorical features."""
warnings.warn(
"'ohe_' attribute has been deprecated in 0.11 and will be removed "
"in 0.13. Use 'categorical_encoder_' instead.",
FutureWarning,
)
return self.categorical_encoder_
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class SMOTEN(SMOTE):
"""Synthetic Minority Over-sampling Technique for Nominal.
This method is referred as SMOTEN in [1]_. It expects that the data to
resample are only made of categorical features.
Read more in the :ref:`User Guide <smote_adasyn>`.
.. versionadded:: 0.8
Parameters
----------
categorical_encoder : estimator, default=None
Ordinal encoder used to encode the categorical features. If `None`, a
:class:`~sklearn.preprocessing.OrdinalEncoder` is used with default parameters.
{sampling_strategy}
{random_state}
k_neighbors : int or object, default=5
The nearest neighbors used to define the neighborhood of samples to use
to generate the synthetic samples. You can pass:
- an `int` corresponding to the number of neighbors to use. A
`~sklearn.neighbors.NearestNeighbors` instance will be fitted in this
case.
- an instance of a compatible nearest neighbors algorithm that should
implement both methods `kneighbors` and `kneighbors_graph`. For
instance, it could correspond to a
:class:`~sklearn.neighbors.NearestNeighbors` but could be extended to
any compatible class.
{n_jobs}
.. deprecated:: 0.10
`n_jobs` has been deprecated in 0.10 and will be removed in 0.12.
It was previously used to set `n_jobs` of nearest neighbors
algorithm. From now on, you can pass an estimator where `n_jobs` is
already set instead.
Attributes
----------
categorical_encoder_ : estimator
The encoder used to encode the categorical features.
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
nn_k_ : estimator object
Validated k-nearest neighbours created from the `k_neighbors` parameter.
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
SMOTE : Over-sample using SMOTE.
SMOTENC : Over-sample using SMOTE for continuous and categorical features.
BorderlineSMOTE : Over-sample using the borderline-SMOTE variant.
SVMSMOTE : Over-sample using the SVM-SMOTE variant.
ADASYN : Over-sample using ADASYN.
KMeansSMOTE : Over-sample applying a clustering before to oversample using
SMOTE.
Notes
-----
See the original papers: [1]_ for more details.
Supports multi-class resampling. A one-vs.-rest scheme is used as
originally proposed in [1]_.
References
----------
.. [1] N. V. Chawla, K. W. Bowyer, L. O.Hall, W. P. Kegelmeyer, "SMOTE:
synthetic minority over-sampling technique," Journal of artificial
intelligence research, 321-357, 2002.
Examples
--------
>>> import numpy as np
>>> X = np.array(["A"] * 10 + ["B"] * 20 + ["C"] * 30, dtype=object).reshape(-1, 1)
>>> y = np.array([0] * 20 + [1] * 40, dtype=np.int32)
>>> from collections import Counter
>>> print(f"Original class counts: {{Counter(y)}}")
Original class counts: Counter({{1: 40, 0: 20}})
>>> from imblearn.over_sampling import SMOTEN
>>> sampler = SMOTEN(random_state=0)
>>> X_res, y_res = sampler.fit_resample(X, y)
>>> print(f"Class counts after resampling {{Counter(y_res)}}")
Class counts after resampling Counter({{0: 40, 1: 40}})
"""
_parameter_constraints: dict = {
**SMOTE._parameter_constraints,
"categorical_encoder": [
HasMethods(["fit_transform", "inverse_transform"]),
None,
],
}
def __init__(
self,
categorical_encoder=None,
*,
sampling_strategy="auto",
random_state=None,
k_neighbors=5,
n_jobs=None,
):
super().__init__(
sampling_strategy=sampling_strategy,
random_state=random_state,
k_neighbors=k_neighbors,
n_jobs=n_jobs,
)
self.categorical_encoder = categorical_encoder
def _check_X_y(self, X, y):
"""Check should accept strings and not sparse matrices."""
y, binarize_y = check_target_type(y, indicate_one_vs_all=True)
X, y = self._validate_data(
X,
y,
reset=True,
dtype=None,
accept_sparse=["csr", "csc"],
)
return X, y, binarize_y
def _validate_estimator(self):
"""Force to use precomputed distance matrix."""
super()._validate_estimator()
self.nn_k_.set_params(metric="precomputed")
def _make_samples(self, X_class, klass, y_dtype, nn_indices, n_samples):
random_state = check_random_state(self.random_state)
# generate sample indices that will be used to generate new samples
samples_indices = random_state.choice(
np.arange(X_class.shape[0]), size=n_samples, replace=True
)
# for each drawn samples, select its k-neighbors and generate a sample
# where for each feature individually, each category generated is the
# most common category
X_new = np.squeeze(
_mode(X_class[nn_indices[samples_indices]], axis=1).mode, axis=1
)
y_new = np.full(n_samples, fill_value=klass, dtype=y_dtype)
return X_new, y_new
def _fit_resample(self, X, y):
# FIXME: to be removed in 0.12
if self.n_jobs is not None:
warnings.warn(
"The parameter `n_jobs` has been deprecated in 0.10 and will be "
"removed in 0.12. You can pass an nearest neighbors estimator where "
"`n_jobs` is already set instead.",
FutureWarning,
)
if sparse.issparse(X):
X_sparse_format = X.format
X = X.toarray()
warnings.warn(
"Passing a sparse matrix to SMOTEN is not really efficient since it is"
" converted to a dense array internally.",
DataConversionWarning,
)
else:
X_sparse_format = None
self._validate_estimator()
X_resampled = [X.copy()]
y_resampled = [y.copy()]
if self.categorical_encoder is None:
self.categorical_encoder_ = OrdinalEncoder(dtype=np.int32)
else:
self.categorical_encoder_ = clone(self.categorical_encoder)
X_encoded = self.categorical_encoder_.fit_transform(X)
vdm = ValueDifferenceMetric(
n_categories=[len(cat) for cat in self.categorical_encoder_.categories_]
).fit(X_encoded, y)
for class_sample, n_samples in self.sampling_strategy_.items():
if n_samples == 0:
continue
target_class_indices = np.flatnonzero(y == class_sample)
X_class = _safe_indexing(X_encoded, target_class_indices)
X_class_dist = vdm.pairwise(X_class)
self.nn_k_.fit(X_class_dist)
# the kneigbors search will include the sample itself which is
# expected from the original algorithm
nn_indices = self.nn_k_.kneighbors(X_class_dist, return_distance=False)
X_new, y_new = self._make_samples(
X_class, class_sample, y.dtype, nn_indices, n_samples
)
X_new = self.categorical_encoder_.inverse_transform(X_new)
X_resampled.append(X_new)
y_resampled.append(y_new)
X_resampled = np.vstack(X_resampled)
y_resampled = np.hstack(y_resampled)
if X_sparse_format == "csr":
return sparse.csr_matrix(X_resampled), y_resampled
elif X_sparse_format == "csc":
return sparse.csc_matrix(X_resampled), y_resampled
else:
return X_resampled, y_resampled
def _more_tags(self):
return {"X_types": ["2darray", "dataframe", "string"]}
| 39,149 | 36.321258 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/_smote/filter.py | """SMOTE variant applying some filtering before the generation process."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Fernando Nogueira
# Christos Aridas
# Dzianis Dudnik
# License: MIT
import numbers
import warnings
import numpy as np
from scipy import sparse
from sklearn.base import clone
from sklearn.svm import SVC
from sklearn.utils import _safe_indexing, check_random_state
from ...utils import Substitution, check_neighbors_object
from ...utils._docstring import _n_jobs_docstring, _random_state_docstring
from ...utils._param_validation import HasMethods, Interval, StrOptions
from ..base import BaseOverSampler
from .base import BaseSMOTE
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class BorderlineSMOTE(BaseSMOTE):
"""Over-sampling using Borderline SMOTE.
This algorithm is a variant of the original SMOTE algorithm proposed in
[2]_. Borderline samples will be detected and used to generate new
synthetic samples.
Read more in the :ref:`User Guide <smote_adasyn>`.
.. versionadded:: 0.4
Parameters
----------
{sampling_strategy}
{random_state}
k_neighbors : int or object, default=5
The nearest neighbors used to define the neighborhood of samples to use
to generate the synthetic samples. You can pass:
- an `int` corresponding to the number of neighbors to use. A
`~sklearn.neighbors.NearestNeighbors` instance will be fitted in this
case.
- an instance of a compatible nearest neighbors algorithm that should
implement both methods `kneighbors` and `kneighbors_graph`. For
instance, it could correspond to a
:class:`~sklearn.neighbors.NearestNeighbors` but could be extended to
any compatible class.
{n_jobs}
.. deprecated:: 0.10
`n_jobs` has been deprecated in 0.10 and will be removed in 0.12.
It was previously used to set `n_jobs` of nearest neighbors
algorithm. From now on, you can pass an estimator where `n_jobs` is
already set instead.
m_neighbors : int or object, default=10
The nearest neighbors used to determine if a minority sample is in
"danger". You can pass:
- an `int` corresponding to the number of neighbors to use. A
`~sklearn.neighbors.NearestNeighbors` instance will be fitted in this
case.
- an instance of a compatible nearest neighbors algorithm that should
implement both methods `kneighbors` and `kneighbors_graph`. For
instance, it could correspond to a
:class:`~sklearn.neighbors.NearestNeighbors` but could be extended to
any compatible class.
kind : {{"borderline-1", "borderline-2"}}, default='borderline-1'
The type of SMOTE algorithm to use one of the following options:
``'borderline-1'``, ``'borderline-2'``.
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
nn_k_ : estimator object
Validated k-nearest neighbours created from the `k_neighbors` parameter.
nn_m_ : estimator object
Validated m-nearest neighbours created from the `m_neighbors` parameter.
in_danger_indices : dict of ndarray
Dictionary containing the indices of the samples considered in danger that
are used to generate new synthetic samples. The keys corresponds to the class
label.
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
SMOTE : Over-sample using SMOTE.
SMOTENC : Over-sample using SMOTE for continuous and categorical features.
SVMSMOTE : Over-sample using SVM-SMOTE variant.
ADASYN : Over-sample using ADASYN.
KMeansSMOTE : Over-sample applying a clustering before to oversample using
SMOTE.
Notes
-----
See the original papers: [2]_ for more details.
Supports multi-class resampling. A one-vs.-rest scheme is used as
originally proposed in [1]_.
References
----------
.. [1] N. V. Chawla, K. W. Bowyer, L. O.Hall, W. P. Kegelmeyer, "SMOTE:
synthetic minority over-sampling technique," Journal of artificial
intelligence research, 321-357, 2002.
.. [2] H. Han, W. Wen-Yuan, M. Bing-Huan, "Borderline-SMOTE: a new
over-sampling method in imbalanced data sets learning," Advances in
intelligent computing, 878-887, 2005.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import BorderlineSMOTE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> sm = BorderlineSMOTE(random_state=42)
>>> X_res, y_res = sm.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 900, 1: 900}})
"""
_parameter_constraints: dict = {
**BaseSMOTE._parameter_constraints,
"m_neighbors": [
Interval(numbers.Integral, 1, None, closed="left"),
HasMethods(["kneighbors", "kneighbors_graph"]),
],
"kind": [StrOptions({"borderline-1", "borderline-2"})],
}
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
k_neighbors=5,
n_jobs=None,
m_neighbors=10,
kind="borderline-1",
):
super().__init__(
sampling_strategy=sampling_strategy,
random_state=random_state,
k_neighbors=k_neighbors,
n_jobs=n_jobs,
)
self.m_neighbors = m_neighbors
self.kind = kind
def _validate_estimator(self):
super()._validate_estimator()
self.nn_m_ = check_neighbors_object(
"m_neighbors", self.m_neighbors, additional_neighbor=1
)
def _fit_resample(self, X, y):
# FIXME: to be removed in 0.12
if self.n_jobs is not None:
warnings.warn(
"The parameter `n_jobs` has been deprecated in 0.10 and will be "
"removed in 0.12. You can pass an nearest neighbors estimator where "
"`n_jobs` is already set instead.",
FutureWarning,
)
self._validate_estimator()
X_resampled = X.copy()
y_resampled = y.copy()
self.in_danger_indices = {}
for class_sample, n_samples in self.sampling_strategy_.items():
if n_samples == 0:
continue
target_class_indices = np.flatnonzero(y == class_sample)
X_class = _safe_indexing(X, target_class_indices)
self.nn_m_.fit(X)
mask_danger = self._in_danger_noise(
self.nn_m_, X_class, class_sample, y, kind="danger"
)
if not any(mask_danger):
continue
X_danger = _safe_indexing(X_class, mask_danger)
self.in_danger_indices[class_sample] = target_class_indices[mask_danger]
if self.kind == "borderline-1":
X_to_sample_from = X_class # consider the positive class only
y_to_check_neighbors = None
else: # self.kind == "borderline-2"
X_to_sample_from = X # consider the whole dataset
y_to_check_neighbors = y
self.nn_k_.fit(X_to_sample_from)
nns = self.nn_k_.kneighbors(X_danger, return_distance=False)[:, 1:]
X_new, y_new = self._make_samples(
X_danger,
y.dtype,
class_sample,
X_to_sample_from,
nns,
n_samples,
y=y_to_check_neighbors,
)
if sparse.issparse(X_new):
X_resampled = sparse.vstack([X_resampled, X_new])
else:
X_resampled = np.vstack((X_resampled, X_new))
y_resampled = np.hstack((y_resampled, y_new))
return X_resampled, y_resampled
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class SVMSMOTE(BaseSMOTE):
"""Over-sampling using SVM-SMOTE.
Variant of SMOTE algorithm which use an SVM algorithm to detect sample to
use for generating new synthetic samples as proposed in [2]_.
Read more in the :ref:`User Guide <smote_adasyn>`.
.. versionadded:: 0.4
Parameters
----------
{sampling_strategy}
{random_state}
k_neighbors : int or object, default=5
The nearest neighbors used to define the neighborhood of samples to use
to generate the synthetic samples. You can pass:
- an `int` corresponding to the number of neighbors to use. A
`~sklearn.neighbors.NearestNeighbors` instance will be fitted in this
case.
- an instance of a compatible nearest neighbors algorithm that should
implement both methods `kneighbors` and `kneighbors_graph`. For
instance, it could correspond to a
:class:`~sklearn.neighbors.NearestNeighbors` but could be extended to
any compatible class.
{n_jobs}
.. deprecated:: 0.10
`n_jobs` has been deprecated in 0.10 and will be removed in 0.12.
It was previously used to set `n_jobs` of nearest neighbors
algorithm. From now on, you can pass an estimator where `n_jobs` is
already set instead.
m_neighbors : int or object, default=10
The nearest neighbors used to determine if a minority sample is in
"danger". You can pass:
- an `int` corresponding to the number of neighbors to use. A
`~sklearn.neighbors.NearestNeighbors` instance will be fitted in this
case.
- an instance of a compatible nearest neighbors algorithm that should
implement both methods `kneighbors` and `kneighbors_graph`. For
instance, it could correspond to a
:class:`~sklearn.neighbors.NearestNeighbors` but could be extended to
any compatible class.
svm_estimator : estimator object, default=SVC()
A parametrized :class:`~sklearn.svm.SVC` classifier can be passed.
A scikit-learn compatible estimator can be passed but it is required
to expose a `support_` fitted attribute.
out_step : float, default=0.5
Step size when extrapolating.
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
nn_k_ : estimator object
Validated k-nearest neighbours created from the `k_neighbors` parameter.
nn_m_ : estimator object
Validated m-nearest neighbours created from the `m_neighbors` parameter.
svm_estimator_ : estimator object
The validated SVM classifier used to detect samples from which to
generate new synthetic samples.
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
SMOTE : Over-sample using SMOTE.
SMOTENC : Over-sample using SMOTE for continuous and categorical features.
SMOTEN : Over-sample using the SMOTE variant specifically for categorical
features only.
BorderlineSMOTE : Over-sample using Borderline-SMOTE.
ADASYN : Over-sample using ADASYN.
KMeansSMOTE : Over-sample applying a clustering before to oversample using
SMOTE.
Notes
-----
See the original papers: [2]_ for more details.
Supports multi-class resampling. A one-vs.-rest scheme is used as
originally proposed in [1]_.
References
----------
.. [1] N. V. Chawla, K. W. Bowyer, L. O.Hall, W. P. Kegelmeyer, "SMOTE:
synthetic minority over-sampling technique," Journal of artificial
intelligence research, 321-357, 2002.
.. [2] H. M. Nguyen, E. W. Cooper, K. Kamei, "Borderline over-sampling for
imbalanced data classification," International Journal of Knowledge
Engineering and Soft Data Paradigms, 3(1), pp.4-21, 2009.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import SVMSMOTE
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> sm = SVMSMOTE(random_state=42)
>>> X_res, y_res = sm.fit_resample(X, y)
>>> print('Resampled dataset shape %s' % Counter(y_res))
Resampled dataset shape Counter({{0: 900, 1: 900}})
"""
_parameter_constraints: dict = {
**BaseSMOTE._parameter_constraints,
"m_neighbors": [
Interval(numbers.Integral, 1, None, closed="left"),
HasMethods(["kneighbors", "kneighbors_graph"]),
],
"svm_estimator": [HasMethods(["fit", "predict"]), None],
"out_step": [Interval(numbers.Real, 0, 1, closed="both")],
}
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
k_neighbors=5,
n_jobs=None,
m_neighbors=10,
svm_estimator=None,
out_step=0.5,
):
super().__init__(
sampling_strategy=sampling_strategy,
random_state=random_state,
k_neighbors=k_neighbors,
n_jobs=n_jobs,
)
self.m_neighbors = m_neighbors
self.svm_estimator = svm_estimator
self.out_step = out_step
def _validate_estimator(self):
super()._validate_estimator()
self.nn_m_ = check_neighbors_object(
"m_neighbors", self.m_neighbors, additional_neighbor=1
)
if self.svm_estimator is None:
self.svm_estimator_ = SVC(gamma="scale", random_state=self.random_state)
else:
self.svm_estimator_ = clone(self.svm_estimator)
def _fit_resample(self, X, y):
# FIXME: to be removed in 0.12
if self.n_jobs is not None:
warnings.warn(
"The parameter `n_jobs` has been deprecated in 0.10 and will be "
"removed in 0.12. You can pass an nearest neighbors estimator where "
"`n_jobs` is already set instead.",
FutureWarning,
)
self._validate_estimator()
random_state = check_random_state(self.random_state)
X_resampled = X.copy()
y_resampled = y.copy()
for class_sample, n_samples in self.sampling_strategy_.items():
if n_samples == 0:
continue
target_class_indices = np.flatnonzero(y == class_sample)
X_class = _safe_indexing(X, target_class_indices)
self.svm_estimator_.fit(X, y)
if not hasattr(self.svm_estimator_, "support_"):
raise RuntimeError(
"`svm_estimator` is required to exposed a `support_` fitted "
"attribute. Such estimator belongs to the familly of Support "
"Vector Machine."
)
support_index = self.svm_estimator_.support_[
y[self.svm_estimator_.support_] == class_sample
]
support_vector = _safe_indexing(X, support_index)
self.nn_m_.fit(X)
noise_bool = self._in_danger_noise(
self.nn_m_, support_vector, class_sample, y, kind="noise"
)
support_vector = _safe_indexing(
support_vector, np.flatnonzero(np.logical_not(noise_bool))
)
if support_vector.shape[0] == 0:
raise ValueError(
"All support vectors are considered as noise. SVM-SMOTE is not "
"adapted to your dataset. Try another SMOTE variant."
)
danger_bool = self._in_danger_noise(
self.nn_m_, support_vector, class_sample, y, kind="danger"
)
safety_bool = np.logical_not(danger_bool)
self.nn_k_.fit(X_class)
fractions = random_state.beta(10, 10)
n_generated_samples = int(fractions * (n_samples + 1))
if np.count_nonzero(danger_bool) > 0:
nns = self.nn_k_.kneighbors(
_safe_indexing(support_vector, np.flatnonzero(danger_bool)),
return_distance=False,
)[:, 1:]
X_new_1, y_new_1 = self._make_samples(
_safe_indexing(support_vector, np.flatnonzero(danger_bool)),
y.dtype,
class_sample,
X_class,
nns,
n_generated_samples,
step_size=1.0,
)
if np.count_nonzero(safety_bool) > 0:
nns = self.nn_k_.kneighbors(
_safe_indexing(support_vector, np.flatnonzero(safety_bool)),
return_distance=False,
)[:, 1:]
X_new_2, y_new_2 = self._make_samples(
_safe_indexing(support_vector, np.flatnonzero(safety_bool)),
y.dtype,
class_sample,
X_class,
nns,
n_samples - n_generated_samples,
step_size=-self.out_step,
)
if np.count_nonzero(danger_bool) > 0 and np.count_nonzero(safety_bool) > 0:
if sparse.issparse(X_resampled):
X_resampled = sparse.vstack([X_resampled, X_new_1, X_new_2])
else:
X_resampled = np.vstack((X_resampled, X_new_1, X_new_2))
y_resampled = np.concatenate((y_resampled, y_new_1, y_new_2), axis=0)
elif np.count_nonzero(danger_bool) == 0:
if sparse.issparse(X_resampled):
X_resampled = sparse.vstack([X_resampled, X_new_2])
else:
X_resampled = np.vstack((X_resampled, X_new_2))
y_resampled = np.concatenate((y_resampled, y_new_2), axis=0)
elif np.count_nonzero(safety_bool) == 0:
if sparse.issparse(X_resampled):
X_resampled = sparse.vstack([X_resampled, X_new_1])
else:
X_resampled = np.vstack((X_resampled, X_new_1))
y_resampled = np.concatenate((y_resampled, y_new_1), axis=0)
return X_resampled, y_resampled
| 19,936 | 35.988868 | 87 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/_smote/cluster.py | """SMOTE variant employing some clustering before the generation."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Fernando Nogueira
# Christos Aridas
# License: MIT
import math
import numbers
import numpy as np
from scipy import sparse
from sklearn.base import clone
from sklearn.cluster import MiniBatchKMeans
from sklearn.metrics import pairwise_distances
from sklearn.utils import _safe_indexing
from ...utils import Substitution
from ...utils._docstring import _n_jobs_docstring, _random_state_docstring
from ...utils._param_validation import HasMethods, Interval, StrOptions
from ..base import BaseOverSampler
from .base import BaseSMOTE
@Substitution(
sampling_strategy=BaseOverSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class KMeansSMOTE(BaseSMOTE):
"""Apply a KMeans clustering before to over-sample using SMOTE.
This is an implementation of the algorithm described in [1]_.
Read more in the :ref:`User Guide <smote_adasyn>`.
.. versionadded:: 0.5
Parameters
----------
{sampling_strategy}
{random_state}
k_neighbors : int or object, default=2
The nearest neighbors used to define the neighborhood of samples to use
to generate the synthetic samples. You can pass:
- an `int` corresponding to the number of neighbors to use. A
`~sklearn.neighbors.NearestNeighbors` instance will be fitted in this
case.
- an instance of a compatible nearest neighbors algorithm that should
implement both methods `kneighbors` and `kneighbors_graph`. For
instance, it could correspond to a
:class:`~sklearn.neighbors.NearestNeighbors` but could be extended to
any compatible class.
{n_jobs}
kmeans_estimator : int or object, default=None
A KMeans instance or the number of clusters to be used. By default,
we used a :class:`~sklearn.cluster.MiniBatchKMeans` which tend to be
better with large number of samples.
cluster_balance_threshold : "auto" or float, default="auto"
The threshold at which a cluster is called balanced and where samples
of the class selected for SMOTE will be oversampled. If "auto", this
will be determined by the ratio for each class, or it can be set
manually.
density_exponent : "auto" or float, default="auto"
This exponent is used to determine the density of a cluster. Leaving
this to "auto" will use a feature-length based exponent.
Attributes
----------
sampling_strategy_ : dict
Dictionary containing the information to sample the dataset. The keys
corresponds to the class labels from which to sample and the values
are the number of samples to sample.
kmeans_estimator_ : estimator
The fitted clustering method used before to apply SMOTE.
nn_k_ : estimator
The fitted k-NN estimator used in SMOTE.
cluster_balance_threshold_ : float
The threshold used during ``fit`` for calling a cluster balanced.
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.10
See Also
--------
SMOTE : Over-sample using SMOTE.
SMOTENC : Over-sample using SMOTE for continuous and categorical features.
SMOTEN : Over-sample using the SMOTE variant specifically for categorical
features only.
SVMSMOTE : Over-sample using SVM-SMOTE variant.
BorderlineSMOTE : Over-sample using Borderline-SMOTE variant.
ADASYN : Over-sample using ADASYN.
References
----------
.. [1] Felix Last, Georgios Douzas, Fernando Bacao, "Oversampling for
Imbalanced Learning Based on K-Means and SMOTE"
https://arxiv.org/abs/1711.00837
Examples
--------
>>> import numpy as np
>>> from imblearn.over_sampling import KMeansSMOTE
>>> from sklearn.datasets import make_blobs
>>> blobs = [100, 800, 100]
>>> X, y = make_blobs(blobs, centers=[(-10, 0), (0,0), (10, 0)])
>>> # Add a single 0 sample in the middle blob
>>> X = np.concatenate([X, [[0, 0]]])
>>> y = np.append(y, 0)
>>> # Make this a binary classification problem
>>> y = y == 1
>>> sm = KMeansSMOTE(
... kmeans_estimator=MiniBatchKMeans(n_init=1, random_state=0), random_state=42
... )
>>> X_res, y_res = sm.fit_resample(X, y)
>>> # Find the number of new samples in the middle blob
>>> n_res_in_middle = ((X_res[:, 0] > -5) & (X_res[:, 0] < 5)).sum()
>>> print("Samples in the middle blob: %s" % n_res_in_middle)
Samples in the middle blob: 801
>>> print("Middle blob unchanged: %s" % (n_res_in_middle == blobs[1] + 1))
Middle blob unchanged: True
>>> print("More 0 samples: %s" % ((y_res == 0).sum() > (y == 0).sum()))
More 0 samples: True
"""
_parameter_constraints: dict = {
**BaseSMOTE._parameter_constraints,
"kmeans_estimator": [
HasMethods(["fit", "predict"]),
Interval(numbers.Integral, 1, None, closed="left"),
None,
],
"cluster_balance_threshold": [StrOptions({"auto"}), numbers.Real],
"density_exponent": [StrOptions({"auto"}), numbers.Real],
}
def __init__(
self,
*,
sampling_strategy="auto",
random_state=None,
k_neighbors=2,
n_jobs=None,
kmeans_estimator=None,
cluster_balance_threshold="auto",
density_exponent="auto",
):
super().__init__(
sampling_strategy=sampling_strategy,
random_state=random_state,
k_neighbors=k_neighbors,
n_jobs=n_jobs,
)
self.kmeans_estimator = kmeans_estimator
self.cluster_balance_threshold = cluster_balance_threshold
self.density_exponent = density_exponent
def _validate_estimator(self):
super()._validate_estimator()
if self.kmeans_estimator is None:
self.kmeans_estimator_ = MiniBatchKMeans(random_state=self.random_state)
elif isinstance(self.kmeans_estimator, int):
self.kmeans_estimator_ = MiniBatchKMeans(
n_clusters=self.kmeans_estimator,
random_state=self.random_state,
)
else:
self.kmeans_estimator_ = clone(self.kmeans_estimator)
self.cluster_balance_threshold_ = (
self.cluster_balance_threshold
if self.kmeans_estimator_.n_clusters != 1
else -np.inf
)
def _find_cluster_sparsity(self, X):
"""Compute the cluster sparsity."""
euclidean_distances = pairwise_distances(
X, metric="euclidean", n_jobs=self.n_jobs
)
# negate diagonal elements
for ind in range(X.shape[0]):
euclidean_distances[ind, ind] = 0
non_diag_elements = (X.shape[0] ** 2) - X.shape[0]
mean_distance = euclidean_distances.sum() / non_diag_elements
exponent = (
math.log(X.shape[0], 1.6) ** 1.8 * 0.16
if self.density_exponent == "auto"
else self.density_exponent
)
return (mean_distance**exponent) / X.shape[0]
def _fit_resample(self, X, y):
self._validate_estimator()
X_resampled = X.copy()
y_resampled = y.copy()
total_inp_samples = sum(self.sampling_strategy_.values())
for class_sample, n_samples in self.sampling_strategy_.items():
if n_samples == 0:
continue
X_clusters = self.kmeans_estimator_.fit_predict(X)
valid_clusters = []
cluster_sparsities = []
# identify cluster which are answering the requirements
for cluster_idx in range(self.kmeans_estimator_.n_clusters):
cluster_mask = np.flatnonzero(X_clusters == cluster_idx)
if cluster_mask.size == 0:
# empty cluster
continue
X_cluster = _safe_indexing(X, cluster_mask)
y_cluster = _safe_indexing(y, cluster_mask)
cluster_class_mean = (y_cluster == class_sample).mean()
if self.cluster_balance_threshold_ == "auto":
balance_threshold = n_samples / total_inp_samples / 2
else:
balance_threshold = self.cluster_balance_threshold_
# the cluster is already considered balanced
if cluster_class_mean < balance_threshold:
continue
# not enough samples to apply SMOTE
anticipated_samples = cluster_class_mean * X_cluster.shape[0]
if anticipated_samples < self.nn_k_.n_neighbors:
continue
X_cluster_class = _safe_indexing(
X_cluster, np.flatnonzero(y_cluster == class_sample)
)
valid_clusters.append(cluster_mask)
cluster_sparsities.append(self._find_cluster_sparsity(X_cluster_class))
cluster_sparsities = np.array(cluster_sparsities)
cluster_weights = cluster_sparsities / cluster_sparsities.sum()
if not valid_clusters:
raise RuntimeError(
f"No clusters found with sufficient samples of "
f"class {class_sample}. Try lowering the "
f"cluster_balance_threshold or increasing the number of "
f"clusters."
)
for valid_cluster_idx, valid_cluster in enumerate(valid_clusters):
X_cluster = _safe_indexing(X, valid_cluster)
y_cluster = _safe_indexing(y, valid_cluster)
X_cluster_class = _safe_indexing(
X_cluster, np.flatnonzero(y_cluster == class_sample)
)
self.nn_k_.fit(X_cluster_class)
nns = self.nn_k_.kneighbors(X_cluster_class, return_distance=False)[
:, 1:
]
cluster_n_samples = int(
math.ceil(n_samples * cluster_weights[valid_cluster_idx])
)
X_new, y_new = self._make_samples(
X_cluster_class,
y.dtype,
class_sample,
X_cluster_class,
nns,
cluster_n_samples,
1.0,
)
stack = [np.vstack, sparse.vstack][int(sparse.issparse(X_new))]
X_resampled = stack((X_resampled, X_new))
y_resampled = np.hstack((y_resampled, y_new))
return X_resampled, y_resampled
| 11,028 | 34.808442 | 87 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/_smote/__init__.py | from .base import SMOTE, SMOTEN, SMOTENC
from .cluster import KMeansSMOTE
from .filter import SVMSMOTE, BorderlineSMOTE
__all__ = [
"SMOTE",
"SMOTEN",
"SMOTENC",
"KMeansSMOTE",
"BorderlineSMOTE",
"SVMSMOTE",
]
| 235 | 17.153846 | 45 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/_smote/tests/test_smote_nc.py | """Test the module SMOTENC."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# Dzianis Dudnik
# License: MIT
from collections import Counter
import numpy as np
import pytest
import sklearn
from scipy import sparse
from sklearn.datasets import make_classification
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils._testing import assert_allclose, assert_array_equal
from sklearn.utils.fixes import parse_version
from imblearn.over_sampling import SMOTENC
from imblearn.utils.estimator_checks import (
_set_checking_parameters,
check_param_validation,
)
sklearn_version = parse_version(sklearn.__version__)
def data_heterogneous_ordered():
rng = np.random.RandomState(42)
X = np.empty((30, 4), dtype=object)
# create 2 random continuous feature
X[:, :2] = rng.randn(30, 2)
# create a categorical feature using some string
X[:, 2] = rng.choice(["a", "b", "c"], size=30).astype(object)
# create a categorical feature using some integer
X[:, 3] = rng.randint(3, size=30)
y = np.array([0] * 10 + [1] * 20)
# return the categories
return X, y, [2, 3]
def data_heterogneous_unordered():
rng = np.random.RandomState(42)
X = np.empty((30, 4), dtype=object)
# create 2 random continuous feature
X[:, [1, 2]] = rng.randn(30, 2)
# create a categorical feature using some string
X[:, 0] = rng.choice(["a", "b", "c"], size=30).astype(object)
# create a categorical feature using some integer
X[:, 3] = rng.randint(3, size=30)
y = np.array([0] * 10 + [1] * 20)
# return the categories
return X, y, [0, 3]
def data_heterogneous_masked():
rng = np.random.RandomState(42)
X = np.empty((30, 4), dtype=object)
# create 2 random continuous feature
X[:, [1, 2]] = rng.randn(30, 2)
# create a categorical feature using some string
X[:, 0] = rng.choice(["a", "b", "c"], size=30).astype(object)
# create a categorical feature using some integer
X[:, 3] = rng.randint(3, size=30)
y = np.array([0] * 10 + [1] * 20)
# return the categories
return X, y, [True, False, False, True]
def data_heterogneous_unordered_multiclass():
rng = np.random.RandomState(42)
X = np.empty((50, 4), dtype=object)
# create 2 random continuous feature
X[:, [1, 2]] = rng.randn(50, 2)
# create a categorical feature using some string
X[:, 0] = rng.choice(["a", "b", "c"], size=50).astype(object)
# create a categorical feature using some integer
X[:, 3] = rng.randint(3, size=50)
y = np.array([0] * 10 + [1] * 15 + [2] * 25)
# return the categories
return X, y, [0, 3]
def data_sparse(format):
rng = np.random.RandomState(42)
X = np.empty((30, 4), dtype=np.float64)
# create 2 random continuous feature
X[:, [1, 2]] = rng.randn(30, 2)
# create a categorical feature using some string
X[:, 0] = rng.randint(3, size=30)
# create a categorical feature using some integer
X[:, 3] = rng.randint(3, size=30)
y = np.array([0] * 10 + [1] * 20)
X = sparse.csr_matrix(X) if format == "csr" else sparse.csc_matrix(X)
return X, y, [0, 3]
def test_smotenc_error():
X, y, _ = data_heterogneous_unordered()
categorical_features = [0, 10]
smote = SMOTENC(random_state=0, categorical_features=categorical_features)
with pytest.raises(ValueError, match="all features must be in"):
smote.fit_resample(X, y)
@pytest.mark.parametrize(
"data",
[
data_heterogneous_ordered(),
data_heterogneous_unordered(),
data_heterogneous_masked(),
data_sparse("csr"),
data_sparse("csc"),
],
)
def test_smotenc(data):
X, y, categorical_features = data
smote = SMOTENC(random_state=0, categorical_features=categorical_features)
X_resampled, y_resampled = smote.fit_resample(X, y)
assert X_resampled.dtype == X.dtype
categorical_features = np.array(categorical_features)
if categorical_features.dtype == bool:
categorical_features = np.flatnonzero(categorical_features)
for cat_idx in categorical_features:
if sparse.issparse(X):
assert set(X[:, cat_idx].data) == set(X_resampled[:, cat_idx].data)
assert X[:, cat_idx].dtype == X_resampled[:, cat_idx].dtype
else:
assert set(X[:, cat_idx]) == set(X_resampled[:, cat_idx])
assert X[:, cat_idx].dtype == X_resampled[:, cat_idx].dtype
assert isinstance(smote.median_std_, dict)
# part of the common test which apply to SMOTE-NC even if it is not default
# constructible
def test_smotenc_check_target_type():
X, _, categorical_features = data_heterogneous_unordered()
y = np.linspace(0, 1, 30)
smote = SMOTENC(categorical_features=categorical_features, random_state=0)
with pytest.raises(ValueError, match="Unknown label type"):
smote.fit_resample(X, y)
rng = np.random.RandomState(42)
y = rng.randint(2, size=(20, 3))
msg = "Multilabel and multioutput targets are not supported."
with pytest.raises(ValueError, match=msg):
smote.fit_resample(X, y)
def test_smotenc_samplers_one_label():
X, _, categorical_features = data_heterogneous_unordered()
y = np.zeros(30)
smote = SMOTENC(categorical_features=categorical_features, random_state=0)
with pytest.raises(ValueError, match="needs to have more than 1 class"):
smote.fit(X, y)
def test_smotenc_fit():
X, y, categorical_features = data_heterogneous_unordered()
smote = SMOTENC(categorical_features=categorical_features, random_state=0)
smote.fit_resample(X, y)
assert hasattr(
smote, "sampling_strategy_"
), "No fitted attribute sampling_strategy_"
def test_smotenc_fit_resample():
X, y, categorical_features = data_heterogneous_unordered()
target_stats = Counter(y)
smote = SMOTENC(categorical_features=categorical_features, random_state=0)
_, y_res = smote.fit_resample(X, y)
_ = Counter(y_res)
n_samples = max(target_stats.values())
assert all(value >= n_samples for value in Counter(y_res).values())
def test_smotenc_fit_resample_sampling_strategy():
X, y, categorical_features = data_heterogneous_unordered_multiclass()
expected_stat = Counter(y)[1]
smote = SMOTENC(categorical_features=categorical_features, random_state=0)
sampling_strategy = {2: 25, 0: 25}
smote.set_params(sampling_strategy=sampling_strategy)
X_res, y_res = smote.fit_resample(X, y)
assert Counter(y_res)[1] == expected_stat
def test_smotenc_pandas():
pd = pytest.importorskip("pandas")
# Check that the samplers handle pandas dataframe and pandas series
X, y, categorical_features = data_heterogneous_unordered_multiclass()
X_pd = pd.DataFrame(X)
smote = SMOTENC(categorical_features=categorical_features, random_state=0)
X_res_pd, y_res_pd = smote.fit_resample(X_pd, y)
X_res, y_res = smote.fit_resample(X, y)
assert_array_equal(X_res_pd.to_numpy(), X_res)
assert_allclose(y_res_pd, y_res)
assert set(smote.median_std_.keys()) == {0, 1}
def test_smotenc_preserve_dtype():
X, y = make_classification(
n_samples=50,
n_classes=3,
n_informative=4,
weights=[0.2, 0.3, 0.5],
random_state=0,
)
# Cast X and y to not default dtype
X = X.astype(np.float32)
y = y.astype(np.int32)
smote = SMOTENC(categorical_features=[1], random_state=0)
X_res, y_res = smote.fit_resample(X, y)
assert X.dtype == X_res.dtype, "X dtype is not preserved"
assert y.dtype == y_res.dtype, "y dtype is not preserved"
@pytest.mark.parametrize("categorical_features", [[True, True, True], [0, 1, 2]])
def test_smotenc_raising_error_all_categorical(categorical_features):
X, y = make_classification(
n_features=3,
n_informative=1,
n_redundant=1,
n_repeated=0,
n_clusters_per_class=1,
)
smote = SMOTENC(categorical_features=categorical_features)
err_msg = "SMOTE-NC is not designed to work only with categorical features"
with pytest.raises(ValueError, match=err_msg):
smote.fit_resample(X, y)
def test_smote_nc_with_null_median_std():
# Non-regression test for #662
# https://github.com/scikit-learn-contrib/imbalanced-learn/issues/662
data = np.array(
[
[1, 2, 1, "A"],
[2, 1, 2, "A"],
[2, 1, 2, "A"],
[1, 2, 3, "B"],
[1, 2, 4, "C"],
[1, 2, 5, "C"],
[1, 2, 4, "C"],
[1, 2, 4, "C"],
[1, 2, 4, "C"],
],
dtype="object",
)
labels = np.array(
[
"class_1",
"class_1",
"class_1",
"class_1",
"class_2",
"class_2",
"class_3",
"class_3",
"class_3",
],
dtype=object,
)
smote = SMOTENC(categorical_features=[3], k_neighbors=1, random_state=0)
X_res, y_res = smote.fit_resample(data, labels)
# check that the categorical feature is not random but correspond to the
# categories seen in the minority class samples
assert_array_equal(X_res[-3:, -1], np.array(["C", "C", "C"], dtype=object))
assert smote.median_std_ == {"class_2": 0.0, "class_3": 0.0}
def test_smotenc_categorical_encoder():
"""Check that we can pass our own categorical encoder."""
# TODO: only use `sparse_output` when sklearn >= 1.2
param = "sparse" if sklearn_version < parse_version("1.2") else "sparse_output"
X, y, categorical_features = data_heterogneous_unordered()
smote = SMOTENC(categorical_features=categorical_features, random_state=0)
smote.fit_resample(X, y)
assert getattr(smote.categorical_encoder_, param) is True
encoder = OneHotEncoder()
encoder.set_params(**{param: False})
smote.set_params(categorical_encoder=encoder).fit_resample(X, y)
assert smote.categorical_encoder is encoder
assert smote.categorical_encoder_ is not encoder
assert getattr(smote.categorical_encoder_, param) is False
# TODO(0.13): remove this test
def test_smotenc_deprecation_ohe_():
"""Check that we raise a deprecation warning when using `ohe_`."""
X, y, categorical_features = data_heterogneous_unordered()
smote = SMOTENC(categorical_features=categorical_features, random_state=0)
smote.fit_resample(X, y)
with pytest.warns(FutureWarning, match="'ohe_' attribute has been deprecated"):
smote.ohe_
def test_smotenc_param_validation():
"""Check that we validate the parameters correctly since this estimator requires
a specific parameter.
"""
categorical_features = [0]
smote = SMOTENC(categorical_features=categorical_features, random_state=0)
name = smote.__class__.__name__
_set_checking_parameters(smote)
check_param_validation(name, smote)
def test_smotenc_bool_categorical():
"""Check that we don't try to early convert the full input data to numeric when
handling a pandas dataframe.
Non-regression test for:
https://github.com/scikit-learn-contrib/imbalanced-learn/issues/974
"""
pd = pytest.importorskip("pandas")
X = pd.DataFrame(
{
"c": pd.Categorical([x for x in "abbacaba" * 3]),
"f": [0.3, 0.5, 0.1, 0.2] * 6,
"b": [False, False, True] * 8,
}
)
y = pd.DataFrame({"out": [1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0] * 2})
smote = SMOTENC(categorical_features=[0])
X_res, y_res = smote.fit_resample(X, y)
pd.testing.assert_series_equal(X_res.dtypes, X.dtypes)
assert len(X_res) == len(y_res)
smote.set_params(categorical_features=[0, 2])
X_res, y_res = smote.fit_resample(X, y)
pd.testing.assert_series_equal(X_res.dtypes, X.dtypes)
assert len(X_res) == len(y_res)
X = X.astype({"b": "category"})
X_res, y_res = smote.fit_resample(X, y)
pd.testing.assert_series_equal(X_res.dtypes, X.dtypes)
assert len(X_res) == len(y_res)
def test_smotenc_categorical_features_str():
"""Check that we support array-like of strings for `categorical_features` using
pandas dataframe.
"""
pd = pytest.importorskip("pandas")
X = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"B": ["a", "b"] * 5,
"C": ["a", "b", "c"] * 3 + ["a"],
}
)
X = pd.concat([X] * 10, ignore_index=True)
y = np.array([0] * 70 + [1] * 30)
smote = SMOTENC(categorical_features=["B", "C"], random_state=0)
X_res, y_res = smote.fit_resample(X, y)
assert X_res["B"].isin(["a", "b"]).all()
assert X_res["C"].isin(["a", "b", "c"]).all()
counter = Counter(y_res)
assert counter[0] == counter[1] == 70
assert_array_equal(smote.categorical_features_, [1, 2])
assert_array_equal(smote.continuous_features_, [0])
def test_smotenc_categorical_features_auto():
"""Check that we can automatically detect categorical features based on pandas
dataframe.
"""
pd = pytest.importorskip("pandas")
X = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"B": ["a", "b"] * 5,
"C": ["a", "b", "c"] * 3 + ["a"],
}
)
X = pd.concat([X] * 10, ignore_index=True)
X["B"] = X["B"].astype("category")
X["C"] = X["C"].astype("category")
y = np.array([0] * 70 + [1] * 30)
smote = SMOTENC(categorical_features="auto", random_state=0)
X_res, y_res = smote.fit_resample(X, y)
assert X_res["B"].isin(["a", "b"]).all()
assert X_res["C"].isin(["a", "b", "c"]).all()
counter = Counter(y_res)
assert counter[0] == counter[1] == 70
assert_array_equal(smote.categorical_features_, [1, 2])
assert_array_equal(smote.continuous_features_, [0])
def test_smote_nc_categorical_features_auto_error():
"""Check that we raise a proper error when we cannot use the `'auto'` mode."""
pd = pytest.importorskip("pandas")
X = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"B": ["a", "b"] * 5,
"C": ["a", "b", "c"] * 3 + ["a"],
}
)
y = np.array([0] * 70 + [1] * 30)
smote = SMOTENC(categorical_features="auto", random_state=0)
with pytest.raises(ValueError, match="the input data should be a pandas.DataFrame"):
smote.fit_resample(X.to_numpy(), y)
err_msg = "SMOTE-NC is not designed to work only with numerical features"
with pytest.raises(ValueError, match=err_msg):
smote.fit_resample(X, y)
| 14,594 | 33.75 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/_smote/tests/test_smoten.py | import numpy as np
import pytest
from sklearn.exceptions import DataConversionWarning
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
from sklearn.utils._testing import _convert_container
from imblearn.over_sampling import SMOTEN
@pytest.fixture
def data():
rng = np.random.RandomState(0)
feature_1 = ["A"] * 10 + ["B"] * 20 + ["C"] * 30
feature_2 = ["A"] * 40 + ["B"] * 20
feature_3 = ["A"] * 20 + ["B"] * 20 + ["C"] * 10 + ["D"] * 10
X = np.array([feature_1, feature_2, feature_3], dtype=object).T
rng.shuffle(X)
y = np.array([0] * 20 + [1] * 40, dtype=np.int32)
y_labels = np.array(["not apple", "apple"], dtype=object)
y = y_labels[y]
return X, y
def test_smoten(data):
# overall check for SMOTEN
X, y = data
sampler = SMOTEN(random_state=0)
X_res, y_res = sampler.fit_resample(X, y)
assert X_res.shape == (80, 3)
assert y_res.shape == (80,)
assert isinstance(sampler.categorical_encoder_, OrdinalEncoder)
def test_smoten_resampling():
# check if the SMOTEN resample data as expected
# we generate data such that "not apple" will be the minority class and
# samples from this class will be generated. We will force the "blue"
# category to be associated with this class. Therefore, the new generated
# samples should as well be from the "blue" category.
X = np.array(["green"] * 5 + ["red"] * 10 + ["blue"] * 7, dtype=object).reshape(
-1, 1
)
y = np.array(
["apple"] * 5
+ ["not apple"] * 3
+ ["apple"] * 7
+ ["not apple"] * 5
+ ["apple"] * 2,
dtype=object,
)
sampler = SMOTEN(random_state=0)
X_res, y_res = sampler.fit_resample(X, y)
X_generated, y_generated = X_res[X.shape[0] :], y_res[X.shape[0] :]
np.testing.assert_array_equal(X_generated, "blue")
np.testing.assert_array_equal(y_generated, "not apple")
@pytest.mark.parametrize("sparse_format", ["sparse_csr", "sparse_csc"])
def test_smoten_sparse_input(data, sparse_format):
"""Check that we handle sparse input in SMOTEN even if it is not efficient.
Non-regression test for:
https://github.com/scikit-learn-contrib/imbalanced-learn/issues/971
"""
X, y = data
X = OneHotEncoder().fit_transform(X)
X = _convert_container(X, sparse_format)
with pytest.warns(DataConversionWarning, match="is not really efficient"):
X_res, y_res = SMOTEN(random_state=0).fit_resample(X, y)
assert X_res.format == X.format
assert X_res.shape[0] == len(y_res)
def test_smoten_categorical_encoder(data):
"""Check that `categorical_encoder` is used when provided."""
X, y = data
sampler = SMOTEN(random_state=0)
sampler.fit_resample(X, y)
assert isinstance(sampler.categorical_encoder_, OrdinalEncoder)
assert sampler.categorical_encoder_.dtype == np.int32
encoder = OrdinalEncoder(dtype=np.int64)
sampler.set_params(categorical_encoder=encoder).fit_resample(X, y)
assert isinstance(sampler.categorical_encoder_, OrdinalEncoder)
assert sampler.categorical_encoder is encoder
assert sampler.categorical_encoder_ is not encoder
assert sampler.categorical_encoder_.dtype == np.int64
| 3,231 | 32.666667 | 84 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/_smote/tests/test_smote.py | """Test the module SMOTE."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.utils._testing import assert_allclose, assert_array_equal
from imblearn.over_sampling import SMOTE
RND_SEED = 0
X = np.array(
[
[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
]
)
Y = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0])
R_TOL = 1e-4
def test_sample_regular():
smote = SMOTE(random_state=RND_SEED)
X_resampled, y_resampled = smote.fit_resample(X, Y)
X_gt = np.array(
[
[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
[0.29307743, -0.14670439],
[0.84976473, -0.15570176],
[0.61319159, -0.11571668],
[0.66052536, -0.28246517],
]
)
y_gt = np.array(
[0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0]
)
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_sample_regular_half():
sampling_strategy = {0: 9, 1: 12}
smote = SMOTE(sampling_strategy=sampling_strategy, random_state=RND_SEED)
X_resampled, y_resampled = smote.fit_resample(X, Y)
X_gt = np.array(
[
[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
[0.36784496, -0.1953161],
]
)
y_gt = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0])
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
def test_sample_regular_with_nn():
nn_k = NearestNeighbors(n_neighbors=6)
smote = SMOTE(random_state=RND_SEED, k_neighbors=nn_k)
X_resampled, y_resampled = smote.fit_resample(X, Y)
X_gt = np.array(
[
[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
[0.29307743, -0.14670439],
[0.84976473, -0.15570176],
[0.61319159, -0.11571668],
[0.66052536, -0.28246517],
]
)
y_gt = np.array(
[0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0]
)
assert_allclose(X_resampled, X_gt, rtol=R_TOL)
assert_array_equal(y_resampled, y_gt)
| 5,046 | 32.646667 | 84 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/_smote/tests/__init__.py | 0 | 0 | 0 | py | |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/_smote/tests/test_borderline_smote.py | from collections import Counter
import pytest
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.utils._testing import assert_allclose, assert_array_equal
from imblearn.over_sampling import BorderlineSMOTE
@pytest.mark.parametrize("kind", ["borderline-1", "borderline-2"])
def test_borderline_smote_no_in_danger_samples(kind):
"""Check that the algorithm behave properly even on a dataset without any sample
in danger.
"""
X, y = make_classification(
n_samples=500,
n_features=2,
n_informative=2,
n_redundant=0,
n_repeated=0,
n_clusters_per_class=1,
n_classes=3,
weights=[0.1, 0.2, 0.7],
class_sep=1.5,
random_state=1,
)
smote = BorderlineSMOTE(kind=kind, m_neighbors=3, k_neighbors=5, random_state=0)
X_res, y_res = smote.fit_resample(X, y)
assert_allclose(X, X_res)
assert_allclose(y, y_res)
assert not smote.in_danger_indices
def test_borderline_smote_kind():
"""Check the behaviour of the `kind` parameter.
In short, "borderline-2" generates sample closer to the boundary decision than
"borderline-1". We generate an example where a logistic regression will perform
worse on "borderline-2" than on "borderline-1".
"""
X, y = make_classification(
n_samples=500,
n_features=2,
n_informative=2,
n_redundant=0,
n_repeated=0,
n_clusters_per_class=1,
n_classes=3,
weights=[0.1, 0.2, 0.7],
class_sep=1.0,
random_state=1,
)
smote = BorderlineSMOTE(
kind="borderline-1", m_neighbors=9, k_neighbors=5, random_state=0
)
X_res_borderline_1, y_res_borderline_1 = smote.fit_resample(X, y)
smote.set_params(kind="borderline-2")
X_res_borderline_2, y_res_borderline_2 = smote.fit_resample(X, y)
score_borderline_1 = (
LogisticRegression()
.fit(X_res_borderline_1, y_res_borderline_1)
.score(X_res_borderline_1, y_res_borderline_1)
)
score_borderline_2 = (
LogisticRegression()
.fit(X_res_borderline_2, y_res_borderline_2)
.score(X_res_borderline_2, y_res_borderline_2)
)
assert score_borderline_1 > score_borderline_2
def test_borderline_smote_in_danger():
X, y = make_classification(
n_samples=500,
n_features=2,
n_informative=2,
n_redundant=0,
n_repeated=0,
n_clusters_per_class=1,
n_classes=3,
weights=[0.1, 0.2, 0.7],
class_sep=0.8,
random_state=1,
)
smote = BorderlineSMOTE(
kind="borderline-1",
m_neighbors=9,
k_neighbors=5,
random_state=0,
)
_, y_res_1 = smote.fit_resample(X, y)
in_danger_indices_borderline_1 = smote.in_danger_indices
smote.set_params(kind="borderline-2")
_, y_res_2 = smote.fit_resample(X, y)
in_danger_indices_borderline_2 = smote.in_danger_indices
for key1, key2 in zip(
in_danger_indices_borderline_1, in_danger_indices_borderline_2
):
assert_array_equal(
in_danger_indices_borderline_1[key1], in_danger_indices_borderline_2[key2]
)
assert len(in_danger_indices_borderline_1) == len(in_danger_indices_borderline_2)
counter = Counter(y_res_1)
assert counter[0] == counter[1] == counter[2]
counter = Counter(y_res_2)
assert counter[0] == counter[1] == counter[2]
| 3,490 | 30.45045 | 86 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/_smote/tests/test_kmeans_smote.py | import numpy as np
import pytest
from sklearn.cluster import KMeans, MiniBatchKMeans
from sklearn.datasets import make_classification
from sklearn.neighbors import NearestNeighbors
from sklearn.utils._testing import assert_allclose, assert_array_equal
from imblearn.over_sampling import SMOTE, KMeansSMOTE
@pytest.fixture
def data():
X = np.array(
[
[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
]
)
y = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0])
return X, y
@pytest.mark.filterwarnings("ignore:The default value of `n_init` will change")
def test_kmeans_smote(data):
X, y = data
kmeans_smote = KMeansSMOTE(
kmeans_estimator=1,
random_state=42,
cluster_balance_threshold=0.0,
k_neighbors=5,
)
smote = SMOTE(random_state=42)
X_res_1, y_res_1 = kmeans_smote.fit_resample(X, y)
X_res_2, y_res_2 = smote.fit_resample(X, y)
assert_allclose(X_res_1, X_res_2)
assert_array_equal(y_res_1, y_res_2)
assert kmeans_smote.nn_k_.n_neighbors == 6
assert kmeans_smote.kmeans_estimator_.n_clusters == 1
assert "batch_size" in kmeans_smote.kmeans_estimator_.get_params()
@pytest.mark.filterwarnings("ignore:The default value of `n_init` will change")
@pytest.mark.parametrize("k_neighbors", [2, NearestNeighbors(n_neighbors=3)])
@pytest.mark.parametrize(
"kmeans_estimator",
[
3,
KMeans(n_clusters=3, n_init=1, random_state=42),
MiniBatchKMeans(n_clusters=3, n_init=1, random_state=42),
],
)
def test_sample_kmeans_custom(data, k_neighbors, kmeans_estimator):
X, y = data
kmeans_smote = KMeansSMOTE(
random_state=42,
kmeans_estimator=kmeans_estimator,
k_neighbors=k_neighbors,
)
X_resampled, y_resampled = kmeans_smote.fit_resample(X, y)
assert X_resampled.shape == (24, 2)
assert y_resampled.shape == (24,)
assert kmeans_smote.nn_k_.n_neighbors == 3
assert kmeans_smote.kmeans_estimator_.n_clusters == 3
@pytest.mark.filterwarnings("ignore:The default value of `n_init` will change")
def test_sample_kmeans_not_enough_clusters(data):
X, y = data
smote = KMeansSMOTE(cluster_balance_threshold=10, random_state=42)
with pytest.raises(RuntimeError):
smote.fit_resample(X, y)
@pytest.mark.parametrize("density_exponent", ["auto", 10])
@pytest.mark.parametrize("cluster_balance_threshold", ["auto", 0.1])
def test_sample_kmeans_density_estimation(density_exponent, cluster_balance_threshold):
X, y = make_classification(
n_samples=10_000, n_classes=2, weights=[0.3, 0.7], random_state=42
)
smote = KMeansSMOTE(
kmeans_estimator=MiniBatchKMeans(n_init=1, random_state=42),
random_state=0,
density_exponent=density_exponent,
cluster_balance_threshold=cluster_balance_threshold,
)
smote.fit_resample(X, y)
| 3,632 | 32.330275 | 87 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/over_sampling/_smote/tests/test_svm_smote.py | import numpy as np
import pytest
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import NearestNeighbors
from sklearn.svm import SVC
from sklearn.utils._testing import assert_allclose, assert_array_equal
from imblearn.over_sampling import SVMSMOTE
@pytest.fixture
def data():
X = np.array(
[
[0.11622591, -0.0317206],
[0.77481731, 0.60935141],
[1.25192108, -0.22367336],
[0.53366841, -0.30312976],
[1.52091956, -0.49283504],
[-0.28162401, -2.10400981],
[0.83680821, 1.72827342],
[0.3084254, 0.33299982],
[0.70472253, -0.73309052],
[0.28893132, -0.38761769],
[1.15514042, 0.0129463],
[0.88407872, 0.35454207],
[1.31301027, -0.92648734],
[-1.11515198, -0.93689695],
[-0.18410027, -0.45194484],
[0.9281014, 0.53085498],
[-0.14374509, 0.27370049],
[-0.41635887, -0.38299653],
[0.08711622, 0.93259929],
[1.70580611, -0.11219234],
]
)
y = np.array([0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0])
return X, y
def test_svm_smote(data):
svm_smote = SVMSMOTE(random_state=42)
svm_smote_nn = SVMSMOTE(
random_state=42,
k_neighbors=NearestNeighbors(n_neighbors=6),
m_neighbors=NearestNeighbors(n_neighbors=11),
svm_estimator=SVC(gamma="scale", random_state=42),
)
X_res_1, y_res_1 = svm_smote.fit_resample(*data)
X_res_2, y_res_2 = svm_smote_nn.fit_resample(*data)
assert_allclose(X_res_1, X_res_2)
assert_array_equal(y_res_1, y_res_2)
def test_svm_smote_not_svm(data):
"""Check that we raise a proper error if passing an estimator that does not
expose a `support_` fitted attribute."""
err_msg = "`svm_estimator` is required to exposed a `support_` fitted attribute."
with pytest.raises(RuntimeError, match=err_msg):
SVMSMOTE(svm_estimator=LogisticRegression()).fit_resample(*data)
def test_svm_smote_all_noise(data):
"""Check that we raise a proper error message when all support vectors are
detected as noise and there is nothing that we can do.
Non-regression test for:
https://github.com/scikit-learn-contrib/imbalanced-learn/issues/742
"""
X, y = make_classification(
n_classes=3,
class_sep=0.001,
weights=[0.004, 0.451, 0.545],
n_informative=3,
n_redundant=0,
flip_y=0,
n_features=3,
n_clusters_per_class=2,
n_samples=1000,
random_state=10,
)
with pytest.raises(ValueError, match="SVM-SMOTE is not adapted to your dataset"):
SVMSMOTE(k_neighbors=4, random_state=42).fit_resample(X, y)
| 2,860 | 31.146067 | 85 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/keras/_generator.py | """Implement generators for ``keras`` which will balance the data."""
# This is a trick to avoid an error during tests collection with pytest. We
# avoid the error when importing the package raise the error at the moment of
# creating the instance.
# This is a trick to avoid an error during tests collection with pytest. We
# avoid the error when importing the package raise the error at the moment of
# creating the instance.
def import_keras():
"""Try to import keras from keras and tensorflow.
This is possible to import the sequence from keras or tensorflow.
"""
def import_from_keras():
try:
import keras # noqa
if hasattr(keras.utils, "Sequence"):
return (keras.utils.Sequence,), True
else:
return (keras.utils.data_utils.Sequence,), True
except ImportError:
return tuple(), False
def import_from_tensforflow():
try:
from tensorflow import keras
if hasattr(keras.utils, "Sequence"):
return (keras.utils.Sequence,), True
else:
return (keras.utils.data_utils.Sequence,), True
except ImportError:
return tuple(), False
ParentClassKeras, has_keras_k = import_from_keras()
ParentClassTensorflow, has_keras_tf = import_from_tensforflow()
has_keras = has_keras_k or has_keras_tf
if has_keras:
if has_keras_k:
ParentClass = ParentClassKeras
else:
ParentClass = ParentClassTensorflow
else:
ParentClass = (object,)
return ParentClass, has_keras
ParentClass, HAS_KERAS = import_keras()
from scipy.sparse import issparse # noqa
from sklearn.base import clone # noqa
from sklearn.utils import _safe_indexing # noqa
from sklearn.utils import check_random_state # noqa
from ..tensorflow import balanced_batch_generator as tf_bbg # noqa
from ..under_sampling import RandomUnderSampler # noqa
from ..utils import Substitution # noqa
from ..utils._docstring import _random_state_docstring # noqa
class BalancedBatchGenerator(*ParentClass): # type: ignore
"""Create balanced batches when training a keras model.
Create a keras ``Sequence`` which is given to ``fit``. The
sampler defines the sampling strategy used to balance the dataset ahead of
creating the batch. The sampler should have an attribute
``sample_indices_``.
.. versionadded:: 0.4
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Original imbalanced dataset.
y : ndarray of shape (n_samples,) or (n_samples, n_classes)
Associated targets.
sample_weight : ndarray of shape (n_samples,)
Sample weight.
sampler : sampler object, default=None
A sampler instance which has an attribute ``sample_indices_``.
By default, the sampler used is a
:class:`~imblearn.under_sampling.RandomUnderSampler`.
batch_size : int, default=32
Number of samples per gradient update.
keep_sparse : bool, default=False
Either or not to conserve or not the sparsity of the input (i.e. ``X``,
``y``, ``sample_weight``). By default, the returned batches will be
dense.
random_state : int, RandomState instance or None, default=None
Control the randomization of the algorithm:
- If int, ``random_state`` is the seed used by the random number
generator;
- If ``RandomState`` instance, random_state is the random number
generator;
- If ``None``, the random number generator is the ``RandomState``
instance used by ``np.random``.
Attributes
----------
sampler_ : sampler object
The sampler used to balance the dataset.
indices_ : ndarray of shape (n_samples, n_features)
The indices of the samples selected during sampling.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> iris = load_iris()
>>> from imblearn.datasets import make_imbalance
>>> class_dict = dict()
>>> class_dict[0] = 30; class_dict[1] = 50; class_dict[2] = 40
>>> X, y = make_imbalance(iris.data, iris.target, sampling_strategy=class_dict)
>>> import tensorflow
>>> y = tensorflow.keras.utils.to_categorical(y, 3)
>>> model = tensorflow.keras.models.Sequential()
>>> model.add(
... tensorflow.keras.layers.Dense(
... y.shape[1], input_dim=X.shape[1], activation='softmax'
... )
... )
>>> model.compile(optimizer='sgd', loss='categorical_crossentropy',
... metrics=['accuracy'])
>>> from imblearn.keras import BalancedBatchGenerator
>>> from imblearn.under_sampling import NearMiss
>>> training_generator = BalancedBatchGenerator(
... X, y, sampler=NearMiss(), batch_size=10, random_state=42)
>>> callback_history = model.fit(training_generator, epochs=10, verbose=0)
"""
# flag for keras sequence duck-typing
use_sequence_api = True
def __init__(
self,
X,
y,
*,
sample_weight=None,
sampler=None,
batch_size=32,
keep_sparse=False,
random_state=None,
):
if not HAS_KERAS:
raise ImportError("'No module named 'keras'")
self.X = X
self.y = y
self.sample_weight = sample_weight
self.sampler = sampler
self.batch_size = batch_size
self.keep_sparse = keep_sparse
self.random_state = random_state
self._sample()
def _sample(self):
random_state = check_random_state(self.random_state)
if self.sampler is None:
self.sampler_ = RandomUnderSampler(random_state=random_state)
else:
self.sampler_ = clone(self.sampler)
self.sampler_.fit_resample(self.X, self.y)
if not hasattr(self.sampler_, "sample_indices_"):
raise ValueError("'sampler' needs to have an attribute 'sample_indices_'.")
self.indices_ = self.sampler_.sample_indices_
# shuffle the indices since the sampler are packing them by class
random_state.shuffle(self.indices_)
def __len__(self):
return int(self.indices_.size // self.batch_size)
def __getitem__(self, index):
X_resampled = _safe_indexing(
self.X,
self.indices_[index * self.batch_size : (index + 1) * self.batch_size],
)
y_resampled = _safe_indexing(
self.y,
self.indices_[index * self.batch_size : (index + 1) * self.batch_size],
)
if issparse(X_resampled) and not self.keep_sparse:
X_resampled = X_resampled.toarray()
if self.sample_weight is not None:
sample_weight_resampled = _safe_indexing(
self.sample_weight,
self.indices_[index * self.batch_size : (index + 1) * self.batch_size],
)
if self.sample_weight is None:
return X_resampled, y_resampled
else:
return X_resampled, y_resampled, sample_weight_resampled
@Substitution(random_state=_random_state_docstring)
def balanced_batch_generator(
X,
y,
*,
sample_weight=None,
sampler=None,
batch_size=32,
keep_sparse=False,
random_state=None,
):
"""Create a balanced batch generator to train keras model.
Returns a generator --- as well as the number of step per epoch --- which
is given to ``fit``. The sampler defines the sampling strategy
used to balance the dataset ahead of creating the batch. The sampler should
have an attribute ``sample_indices_``.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Original imbalanced dataset.
y : ndarray of shape (n_samples,) or (n_samples, n_classes)
Associated targets.
sample_weight : ndarray of shape (n_samples,), default=None
Sample weight.
sampler : sampler object, default=None
A sampler instance which has an attribute ``sample_indices_``.
By default, the sampler used is a
:class:`~imblearn.under_sampling.RandomUnderSampler`.
batch_size : int, default=32
Number of samples per gradient update.
keep_sparse : bool, default=False
Either or not to conserve or not the sparsity of the input (i.e. ``X``,
``y``, ``sample_weight``). By default, the returned batches will be
dense.
{random_state}
Returns
-------
generator : generator of tuple
Generate batch of data. The tuple generated are either (X_batch,
y_batch) or (X_batch, y_batch, sampler_weight_batch).
steps_per_epoch : int
The number of samples per epoch. Required by ``fit_generator`` in
keras.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> X, y = load_iris(return_X_y=True)
>>> from imblearn.datasets import make_imbalance
>>> class_dict = dict()
>>> class_dict[0] = 30; class_dict[1] = 50; class_dict[2] = 40
>>> from imblearn.datasets import make_imbalance
>>> X, y = make_imbalance(X, y, sampling_strategy=class_dict)
>>> import tensorflow
>>> y = tensorflow.keras.utils.to_categorical(y, 3)
>>> model = tensorflow.keras.models.Sequential()
>>> model.add(
... tensorflow.keras.layers.Dense(
... y.shape[1], input_dim=X.shape[1], activation='softmax'
... )
... )
>>> model.compile(optimizer='sgd', loss='categorical_crossentropy',
... metrics=['accuracy'])
>>> from imblearn.keras import balanced_batch_generator
>>> from imblearn.under_sampling import NearMiss
>>> training_generator, steps_per_epoch = balanced_batch_generator(
... X, y, sampler=NearMiss(), batch_size=10, random_state=42)
>>> callback_history = model.fit(training_generator,
... steps_per_epoch=steps_per_epoch,
... epochs=10, verbose=0)
"""
return tf_bbg(
X=X,
y=y,
sample_weight=sample_weight,
sampler=sampler,
batch_size=batch_size,
keep_sparse=keep_sparse,
random_state=random_state,
)
| 10,276 | 33.719595 | 87 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/keras/__init__.py | """The :mod:`imblearn.keras` provides utilities to deal with imbalanced dataset
in keras."""
from ._generator import BalancedBatchGenerator, balanced_batch_generator
__all__ = ["BalancedBatchGenerator", "balanced_batch_generator"]
| 233 | 32.428571 | 79 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/keras/tests/test_generator.py | import numpy as np
import pytest
from scipy import sparse
from sklearn.cluster import KMeans
from sklearn.datasets import load_iris
from sklearn.preprocessing import LabelBinarizer
keras = pytest.importorskip("keras")
from keras.layers import Dense # noqa: E402
from keras.models import Sequential # noqa: E402
from imblearn.datasets import make_imbalance # noqa: E402
from imblearn.keras import (
BalancedBatchGenerator, # noqa: E402
balanced_batch_generator, # noqa: E402
)
from imblearn.over_sampling import RandomOverSampler # noqa: E402
from imblearn.under_sampling import (
ClusterCentroids, # noqa: E402
NearMiss, # noqa: E402
)
3
@pytest.fixture
def data():
iris = load_iris()
X, y = make_imbalance(
iris.data, iris.target, sampling_strategy={0: 30, 1: 50, 2: 40}
)
y = LabelBinarizer().fit_transform(y)
return X, y
def _build_keras_model(n_classes, n_features):
model = Sequential()
model.add(Dense(n_classes, input_dim=n_features, activation="softmax"))
model.compile(
optimizer="sgd", loss="categorical_crossentropy", metrics=["accuracy"]
)
return model
def test_balanced_batch_generator_class_no_return_indices(data):
with pytest.raises(ValueError, match="needs to have an attribute"):
BalancedBatchGenerator(
*data, sampler=ClusterCentroids(estimator=KMeans(n_init=1)), batch_size=10
)
@pytest.mark.filterwarnings("ignore:`wait_time` is not used") # keras 2.2.4
@pytest.mark.parametrize(
"sampler, sample_weight",
[
(None, None),
(RandomOverSampler(), None),
(NearMiss(), None),
(None, np.random.uniform(size=120)),
],
)
def test_balanced_batch_generator_class(data, sampler, sample_weight):
X, y = data
model = _build_keras_model(y.shape[1], X.shape[1])
training_generator = BalancedBatchGenerator(
X,
y,
sample_weight=sample_weight,
sampler=sampler,
batch_size=10,
random_state=42,
)
model.fit_generator(generator=training_generator, epochs=10)
@pytest.mark.parametrize("keep_sparse", [True, False])
def test_balanced_batch_generator_class_sparse(data, keep_sparse):
X, y = data
training_generator = BalancedBatchGenerator(
sparse.csr_matrix(X),
y,
batch_size=10,
keep_sparse=keep_sparse,
random_state=42,
)
for idx in range(len(training_generator)):
X_batch, _ = training_generator.__getitem__(idx)
if keep_sparse:
assert sparse.issparse(X_batch)
else:
assert not sparse.issparse(X_batch)
def test_balanced_batch_generator_function_no_return_indices(data):
with pytest.raises(ValueError, match="needs to have an attribute"):
balanced_batch_generator(
*data,
sampler=ClusterCentroids(estimator=KMeans(n_init=10)),
batch_size=10,
random_state=42,
)
@pytest.mark.filterwarnings("ignore:`wait_time` is not used") # keras 2.2.4
@pytest.mark.parametrize(
"sampler, sample_weight",
[
(None, None),
(RandomOverSampler(), None),
(NearMiss(), None),
(None, np.random.uniform(size=120)),
],
)
def test_balanced_batch_generator_function(data, sampler, sample_weight):
X, y = data
model = _build_keras_model(y.shape[1], X.shape[1])
training_generator, steps_per_epoch = balanced_batch_generator(
X,
y,
sample_weight=sample_weight,
sampler=sampler,
batch_size=10,
random_state=42,
)
model.fit_generator(
generator=training_generator,
steps_per_epoch=steps_per_epoch,
epochs=10,
)
@pytest.mark.parametrize("keep_sparse", [True, False])
def test_balanced_batch_generator_function_sparse(data, keep_sparse):
X, y = data
training_generator, steps_per_epoch = balanced_batch_generator(
sparse.csr_matrix(X),
y,
keep_sparse=keep_sparse,
batch_size=10,
random_state=42,
)
for _ in range(steps_per_epoch):
X_batch, _ = next(training_generator)
if keep_sparse:
assert sparse.issparse(X_batch)
else:
assert not sparse.issparse(X_batch)
| 4,289 | 27.986486 | 86 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/keras/tests/__init__.py | 0 | 0 | 0 | py | |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/_show_versions.py | """
Utility method which prints system info to help with debugging,
and filing issues on GitHub.
Adapted from :func:`sklearn.show_versions`,
which was adapted from :func:`pandas.show_versions`
"""
# Author: Alexander L. Hayes <hayesall@iu.edu>
# License: MIT
from .. import __version__
def _get_deps_info():
"""Overview of the installed version of main dependencies
Returns
-------
deps_info: dict
version information on relevant Python libraries
"""
deps = [
"imbalanced-learn",
"pip",
"setuptools",
"numpy",
"scipy",
"scikit-learn",
"Cython",
"pandas",
"keras",
"tensorflow",
"joblib",
]
deps_info = {
"imbalanced-learn": __version__,
}
from importlib.metadata import PackageNotFoundError, version
for modname in deps:
try:
deps_info[modname] = version(modname)
except PackageNotFoundError:
deps_info[modname] = None
return deps_info
def show_versions(github=False):
"""Print debugging information.
.. versionadded:: 0.5
Parameters
----------
github : bool,
If true, wrap system info with GitHub markup.
"""
from sklearn.utils._show_versions import _get_sys_info
_sys_info = _get_sys_info()
_deps_info = _get_deps_info()
_github_markup = (
"<details>"
"<summary>System, Dependency Information</summary>\n\n"
"**System Information**\n\n"
"{0}\n"
"**Python Dependencies**\n\n"
"{1}\n"
"</details>"
)
if github:
_sys_markup = ""
_deps_markup = ""
for k, stat in _sys_info.items():
_sys_markup += f"* {k:<10}: `{stat}`\n"
for k, stat in _deps_info.items():
_deps_markup += f"* {k:<10}: `{stat}`\n"
print(_github_markup.format(_sys_markup, _deps_markup))
else:
print("\nSystem:")
for k, stat in _sys_info.items():
print(f"{k:>11}: {stat}")
print("\nPython dependencies:")
for k, stat in _deps_info.items():
print(f"{k:>11}: {stat}")
| 2,176 | 22.408602 | 64 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/deprecation.py | """Utilities for deprecation"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
import warnings
def deprecate_parameter(sampler, version_deprecation, param_deprecated, new_param=None):
"""Helper to deprecate a parameter by another one.
Parameters
----------
sampler : sampler object,
The object which will be inspected.
version_deprecation : str,
The version from which the parameter will be deprecated. The format
should be ``'x.y'``.
param_deprecated : str,
The parameter being deprecated.
new_param : str,
The parameter used instead of the deprecated parameter. By default, no
parameter is expected.
"""
x, y = version_deprecation.split(".")
version_removed = x + "." + str(int(y) + 2)
if new_param is None:
if getattr(sampler, param_deprecated) is not None:
warnings.warn(
f"'{param_deprecated}' is deprecated from {version_deprecation} and "
f" will be removed in {version_removed} for the estimator "
f"{sampler.__class__}.",
category=FutureWarning,
)
else:
if getattr(sampler, param_deprecated) is not None:
warnings.warn(
f"'{param_deprecated}' is deprecated from {version_deprecation} and "
f"will be removed in {version_removed} for the estimator "
f"{sampler.__class__}. Use '{new_param}' instead.",
category=FutureWarning,
)
setattr(sampler, new_param, getattr(sampler, param_deprecated))
| 1,631 | 33.723404 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/_docstring.py | """Utilities for docstring in imbalanced-learn."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
class Substitution:
"""Decorate a function's or a class' docstring to perform string
substitution on it.
This decorator should be robust even if obj.__doc__ is None
(for example, if -OO was passed to the interpreter)
"""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise AssertionError("Only positional or keyword args are allowed")
self.params = args or kwargs
def __call__(self, obj):
if obj.__doc__:
obj.__doc__ = obj.__doc__.format(**self.params)
return obj
_random_state_docstring = """random_state : int, RandomState instance, default=None
Control the randomization of the algorithm.
- If int, ``random_state`` is the seed used by the random number
generator;
- If ``RandomState`` instance, random_state is the random number
generator;
- If ``None``, the random number generator is the ``RandomState``
instance used by ``np.random``.
""".rstrip()
_n_jobs_docstring = """n_jobs : int, default=None
Number of CPU cores used during the cross-validation loop.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See
`Glossary <https://scikit-learn.org/stable/glossary.html#term-n-jobs>`_
for more details.
""".rstrip()
| 1,512 | 32.622222 | 83 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/testing.py | """Test utilities."""
# Adapted from scikit-learn
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
import inspect
import pkgutil
from importlib import import_module
from operator import itemgetter
from pathlib import Path
import numpy as np
from scipy import sparse
from sklearn.base import BaseEstimator
from sklearn.neighbors import KDTree
from sklearn.utils._testing import ignore_warnings
def all_estimators(
type_filter=None,
):
"""Get a list of all estimators from imblearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators are also not included.
This function is adapted from sklearn.
Parameters
----------
type_filter : str, list of str, or None, default=None
Which kind of estimators should be returned. If None, no
filter is applied and all estimators are returned. Possible
values are 'sampler' to get estimators only of these specific
types, or a list of these to get the estimators that fit at
least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actual type of the class.
"""
from ..base import SamplerMixin
def is_abstract(c):
if not (hasattr(c, "__abstractmethods__")):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
modules_to_ignore = {"tests"}
root = str(Path(__file__).parent.parent)
# Ignore deprecation warnings triggered at import time and from walking
# packages
with ignore_warnings(category=FutureWarning):
for importer, modname, ispkg in pkgutil.walk_packages(
path=[root], prefix="imblearn."
):
mod_parts = modname.split(".")
if any(part in modules_to_ignore for part in mod_parts) or "._" in modname:
continue
module = import_module(modname)
classes = inspect.getmembers(module, inspect.isclass)
classes = [
(name, est_cls) for name, est_cls in classes if not name.startswith("_")
]
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [
c
for c in all_classes
if (issubclass(c[1], BaseEstimator) and c[0] != "BaseEstimator")
]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
# get rid of sklearn estimators which have been imported in some classes
estimators = [c for c in estimators if "sklearn" not in c[1].__module__]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {"sampler": SamplerMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend(
[est for est in estimators if issubclass(est[1], mixin)]
)
estimators = filtered_estimators
if type_filter:
raise ValueError(
"Parameter type_filter must be 'sampler' or "
"None, got"
" %s." % repr(type_filter)
)
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(estimators), key=itemgetter(0))
class _CustomNearestNeighbors(BaseEstimator):
"""Basic implementation of nearest neighbors not relying on scikit-learn.
`kneighbors_graph` is ignored and `metric` does not have any impact.
"""
def __init__(self, n_neighbors=1, metric="euclidean"):
self.n_neighbors = n_neighbors
self.metric = metric
def fit(self, X, y=None):
X = X.toarray() if sparse.issparse(X) else X
self._kd_tree = KDTree(X)
return self
def kneighbors(self, X, n_neighbors=None, return_distance=True):
n_neighbors = n_neighbors if n_neighbors is not None else self.n_neighbors
X = X.toarray() if sparse.issparse(X) else X
distances, indices = self._kd_tree.query(X, k=n_neighbors)
if return_distance:
return distances, indices
return indices
def kneighbors_graph(X=None, n_neighbors=None, mode="connectivity"):
"""This method is not used within imblearn but it is required for
duck-typing."""
pass
class _CustomClusterer(BaseEstimator):
"""Class that mimics a cluster that does not expose `cluster_centers_`."""
def __init__(self, n_clusters=1, expose_cluster_centers=True):
self.n_clusters = n_clusters
self.expose_cluster_centers = expose_cluster_centers
def fit(self, X, y=None):
if self.expose_cluster_centers:
self.cluster_centers_ = np.random.randn(self.n_clusters, X.shape[1])
return self
def predict(self, X):
return np.zeros(len(X), dtype=int)
| 5,322 | 32.689873 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/estimator_checks.py | """Utils to check the samplers and compatibility with scikit-learn"""
# Adapated from scikit-learn
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
import re
import sys
import traceback
import warnings
from collections import Counter
from functools import partial
import numpy as np
import pytest
import sklearn
from scipy import sparse
from sklearn.base import clone, is_classifier, is_regressor
from sklearn.cluster import KMeans
from sklearn.datasets import ( # noqa
load_iris,
make_blobs,
make_classification,
make_multilabel_classification,
)
from sklearn.exceptions import SkipTestWarning
from sklearn.preprocessing import StandardScaler, label_binarize
from sklearn.utils._tags import _safe_tags
from sklearn.utils._testing import (
SkipTest,
assert_allclose,
assert_array_equal,
assert_raises_regex,
raises,
set_random_state,
)
from sklearn.utils.estimator_checks import (
_enforce_estimator_tags_y,
_get_check_estimator_ids,
_maybe_mark_xfail,
)
try:
from sklearn.utils.estimator_checks import _enforce_estimator_tags_x
except ImportError:
# scikit-learn >= 1.2
from sklearn.utils.estimator_checks import (
_enforce_estimator_tags_X as _enforce_estimator_tags_x,
)
from sklearn.utils.fixes import parse_version
from sklearn.utils.multiclass import type_of_target
from imblearn.datasets import make_imbalance
from imblearn.over_sampling.base import BaseOverSampler
from imblearn.under_sampling.base import BaseCleaningSampler, BaseUnderSampler
from imblearn.utils._param_validation import generate_invalid_param_val, make_constraint
sklearn_version = parse_version(sklearn.__version__)
def sample_dataset_generator():
X, y = make_classification(
n_samples=1000,
n_classes=3,
n_informative=4,
weights=[0.2, 0.3, 0.5],
random_state=0,
)
return X, y
@pytest.fixture(name="sample_dataset_generator")
def sample_dataset_generator_fixture():
return sample_dataset_generator()
def _set_checking_parameters(estimator):
params = estimator.get_params()
name = estimator.__class__.__name__
if "n_estimators" in params:
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if name == "ClusterCentroids":
if sklearn_version < parse_version("1.1"):
algorithm = "full"
else:
algorithm = "lloyd"
estimator.set_params(
voting="soft",
estimator=KMeans(random_state=0, algorithm=algorithm, n_init=1),
)
if name == "KMeansSMOTE":
estimator.set_params(kmeans_estimator=12)
if name == "BalancedRandomForestClassifier":
# TODO: remove in 0.13
# future default in 0.13
estimator.set_params(replacement=True, sampling_strategy="all", bootstrap=False)
def _yield_sampler_checks(sampler):
tags = sampler._get_tags()
yield check_target_type
yield check_samplers_one_label
yield check_samplers_fit
yield check_samplers_fit_resample
yield check_samplers_sampling_strategy_fit_resample
if "sparse" in tags["X_types"]:
yield check_samplers_sparse
if "dataframe" in tags["X_types"]:
yield check_samplers_pandas
if "string" in tags["X_types"]:
yield check_samplers_string
if tags["allow_nan"]:
yield check_samplers_nan
yield check_samplers_list
yield check_samplers_multiclass_ova
yield check_samplers_preserve_dtype
# we don't filter samplers based on their tag here because we want to make
# sure that the fitted attribute does not exist if the tag is not
# stipulated
yield check_samplers_sample_indices
yield check_samplers_2d_target
yield check_sampler_get_feature_names_out
yield check_sampler_get_feature_names_out_pandas
def _yield_classifier_checks(classifier):
yield check_classifier_on_multilabel_or_multioutput_targets
yield check_classifiers_with_encoded_labels
def _yield_all_checks(estimator):
name = estimator.__class__.__name__
tags = estimator._get_tags()
if tags["_skip_test"]:
warnings.warn(
f"Explicit SKIP via _skip_test tag for estimator {name}.",
SkipTestWarning,
)
return
# trigger our checks if this is a SamplerMixin
if hasattr(estimator, "fit_resample"):
for check in _yield_sampler_checks(estimator):
yield check
if hasattr(estimator, "predict"):
for check in _yield_classifier_checks(estimator):
yield check
def parametrize_with_checks(estimators):
"""Pytest specific decorator for parametrizing estimator checks.
The `id` of each check is set to be a pprint version of the estimator
and the name of the check with its keyword arguments.
This allows to use `pytest -k` to specify which tests to run::
pytest test_check_estimators.py -k check_estimators_fit_returns_self
Parameters
----------
estimators : list of estimators instances
Estimators to generated checks for.
Returns
-------
decorator : `pytest.mark.parametrize`
Examples
--------
>>> from sklearn.utils.estimator_checks import parametrize_with_checks
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.tree import DecisionTreeRegressor
>>> @parametrize_with_checks([LogisticRegression(),
... DecisionTreeRegressor()])
... def test_sklearn_compatible_estimator(estimator, check):
... check(estimator)
"""
def checks_generator():
for estimator in estimators:
name = type(estimator).__name__
for check in _yield_all_checks(estimator):
check = partial(check, name)
yield _maybe_mark_xfail(estimator, check, pytest)
return pytest.mark.parametrize(
"estimator, check", checks_generator(), ids=_get_check_estimator_ids
)
def check_target_type(name, estimator_orig):
estimator = clone(estimator_orig)
# should raise warning if the target is continuous (we cannot raise error)
X = np.random.random((20, 2))
y = np.linspace(0, 1, 20)
msg = "Unknown label type:"
assert_raises_regex(
ValueError,
msg,
estimator.fit_resample,
X,
y,
)
# if the target is multilabel then we should raise an error
rng = np.random.RandomState(42)
y = rng.randint(2, size=(20, 3))
msg = "Multilabel and multioutput targets are not supported."
assert_raises_regex(
ValueError,
msg,
estimator.fit_resample,
X,
y,
)
def check_samplers_one_label(name, sampler_orig):
sampler = clone(sampler_orig)
error_string_fit = "Sampler can't balance when only one class is present."
X = np.random.random((20, 2))
y = np.zeros(20)
try:
sampler.fit_resample(X, y)
except ValueError as e:
if "class" not in repr(e):
print(error_string_fit, sampler.__class__.__name__, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, traceback, exc)
traceback.print_exc(file=sys.stdout)
raise exc
raise AssertionError(error_string_fit)
def check_samplers_fit(name, sampler_orig):
sampler = clone(sampler_orig)
np.random.seed(42) # Make this test reproducible
X = np.random.random((30, 2))
y = np.array([1] * 20 + [0] * 10)
sampler.fit_resample(X, y)
assert hasattr(
sampler, "sampling_strategy_"
), "No fitted attribute sampling_strategy_"
def check_samplers_fit_resample(name, sampler_orig):
sampler = clone(sampler_orig)
X, y = sample_dataset_generator()
target_stats = Counter(y)
X_res, y_res = sampler.fit_resample(X, y)
if isinstance(sampler, BaseOverSampler):
target_stats_res = Counter(y_res)
n_samples = max(target_stats.values())
assert all(value >= n_samples for value in Counter(y_res).values())
elif isinstance(sampler, BaseUnderSampler):
n_samples = min(target_stats.values())
if name == "InstanceHardnessThreshold":
# IHT does not enforce the number of samples but provide a number
# of samples the closest to the desired target.
assert all(
Counter(y_res)[k] <= target_stats[k] for k in target_stats.keys()
)
else:
assert all(value == n_samples for value in Counter(y_res).values())
elif isinstance(sampler, BaseCleaningSampler):
target_stats_res = Counter(y_res)
class_minority = min(target_stats, key=target_stats.get)
assert all(
target_stats[class_sample] > target_stats_res[class_sample]
for class_sample in target_stats.keys()
if class_sample != class_minority
)
def check_samplers_sampling_strategy_fit_resample(name, sampler_orig):
sampler = clone(sampler_orig)
# in this test we will force all samplers to not change the class 1
X, y = sample_dataset_generator()
expected_stat = Counter(y)[1]
if isinstance(sampler, BaseOverSampler):
sampling_strategy = {2: 498, 0: 498}
sampler.set_params(sampling_strategy=sampling_strategy)
X_res, y_res = sampler.fit_resample(X, y)
assert Counter(y_res)[1] == expected_stat
elif isinstance(sampler, BaseUnderSampler):
sampling_strategy = {2: 201, 0: 201}
sampler.set_params(sampling_strategy=sampling_strategy)
X_res, y_res = sampler.fit_resample(X, y)
assert Counter(y_res)[1] == expected_stat
elif isinstance(sampler, BaseCleaningSampler):
sampling_strategy = [2, 0]
sampler.set_params(sampling_strategy=sampling_strategy)
X_res, y_res = sampler.fit_resample(X, y)
assert Counter(y_res)[1] == expected_stat
def check_samplers_sparse(name, sampler_orig):
sampler = clone(sampler_orig)
# check that sparse matrices can be passed through the sampler leading to
# the same results than dense
X, y = sample_dataset_generator()
X_sparse = sparse.csr_matrix(X)
X_res_sparse, y_res_sparse = sampler.fit_resample(X_sparse, y)
sampler = clone(sampler)
X_res, y_res = sampler.fit_resample(X, y)
assert sparse.issparse(X_res_sparse)
assert_allclose(X_res_sparse.A, X_res, rtol=1e-5)
assert_allclose(y_res_sparse, y_res)
def check_samplers_pandas(name, sampler_orig):
pd = pytest.importorskip("pandas")
sampler = clone(sampler_orig)
# Check that the samplers handle pandas dataframe and pandas series
X, y = sample_dataset_generator()
X_df = pd.DataFrame(X, columns=[str(i) for i in range(X.shape[1])])
y_df = pd.DataFrame(y)
y_s = pd.Series(y, name="class")
X_res_df, y_res_s = sampler.fit_resample(X_df, y_s)
X_res_df, y_res_df = sampler.fit_resample(X_df, y_df)
X_res, y_res = sampler.fit_resample(X, y)
# check that we return the same type for dataframes or series types
assert isinstance(X_res_df, pd.DataFrame)
assert isinstance(y_res_df, pd.DataFrame)
assert isinstance(y_res_s, pd.Series)
assert X_df.columns.tolist() == X_res_df.columns.tolist()
assert y_df.columns.tolist() == y_res_df.columns.tolist()
assert y_s.name == y_res_s.name
# FIXME: we should use to_numpy with pandas >= 0.25
assert_allclose(X_res_df.values, X_res)
assert_allclose(y_res_df.values.ravel(), y_res)
assert_allclose(y_res_s.values, y_res)
def check_samplers_list(name, sampler_orig):
sampler = clone(sampler_orig)
# Check that the can samplers handle simple lists
X, y = sample_dataset_generator()
X_list = X.tolist()
y_list = y.tolist()
X_res, y_res = sampler.fit_resample(X, y)
X_res_list, y_res_list = sampler.fit_resample(X_list, y_list)
assert isinstance(X_res_list, list)
assert isinstance(y_res_list, list)
assert_allclose(X_res, X_res_list)
assert_allclose(y_res, y_res_list)
def check_samplers_multiclass_ova(name, sampler_orig):
sampler = clone(sampler_orig)
# Check that multiclass target lead to the same results than OVA encoding
X, y = sample_dataset_generator()
y_ova = label_binarize(y, classes=np.unique(y))
X_res, y_res = sampler.fit_resample(X, y)
X_res_ova, y_res_ova = sampler.fit_resample(X, y_ova)
assert_allclose(X_res, X_res_ova)
assert type_of_target(y_res_ova) == type_of_target(y_ova)
assert_allclose(y_res, y_res_ova.argmax(axis=1))
def check_samplers_2d_target(name, sampler_orig):
sampler = clone(sampler_orig)
X, y = sample_dataset_generator()
y = y.reshape(-1, 1) # Make the target 2d
sampler.fit_resample(X, y)
def check_samplers_preserve_dtype(name, sampler_orig):
sampler = clone(sampler_orig)
X, y = sample_dataset_generator()
# Cast X and y to not default dtype
X = X.astype(np.float32)
y = y.astype(np.int32)
X_res, y_res = sampler.fit_resample(X, y)
assert X.dtype == X_res.dtype, "X dtype is not preserved"
assert y.dtype == y_res.dtype, "y dtype is not preserved"
def check_samplers_sample_indices(name, sampler_orig):
sampler = clone(sampler_orig)
X, y = sample_dataset_generator()
sampler.fit_resample(X, y)
sample_indices = sampler._get_tags().get("sample_indices", None)
if sample_indices:
assert hasattr(sampler, "sample_indices_") is sample_indices
else:
assert not hasattr(sampler, "sample_indices_")
def check_samplers_string(name, sampler_orig):
rng = np.random.RandomState(0)
sampler = clone(sampler_orig)
categories = np.array(["A", "B", "C"], dtype=object)
n_samples = 30
X = rng.randint(low=0, high=3, size=n_samples).reshape(-1, 1)
X = categories[X]
y = rng.permutation([0] * 10 + [1] * 20)
X_res, y_res = sampler.fit_resample(X, y)
assert X_res.dtype == object
assert X_res.shape[0] == y_res.shape[0]
assert_array_equal(np.unique(X_res.ravel()), categories)
def check_samplers_nan(name, sampler_orig):
rng = np.random.RandomState(0)
sampler = clone(sampler_orig)
categories = np.array([0, 1, np.nan], dtype=np.float64)
n_samples = 100
X = rng.randint(low=0, high=3, size=n_samples).reshape(-1, 1)
X = categories[X]
y = rng.permutation([0] * 40 + [1] * 60)
X_res, y_res = sampler.fit_resample(X, y)
assert X_res.dtype == np.float64
assert X_res.shape[0] == y_res.shape[0]
assert np.any(np.isnan(X_res.ravel()))
def check_classifier_on_multilabel_or_multioutput_targets(name, estimator_orig):
estimator = clone(estimator_orig)
X, y = make_multilabel_classification(n_samples=30)
msg = "Multilabel and multioutput targets are not supported."
with pytest.raises(ValueError, match=msg):
estimator.fit(X, y)
def check_classifiers_with_encoded_labels(name, classifier_orig):
# Non-regression test for #709
# https://github.com/scikit-learn-contrib/imbalanced-learn/issues/709
pd = pytest.importorskip("pandas")
classifier = clone(classifier_orig)
iris = load_iris(as_frame=True)
df, y = iris.data, iris.target
y = pd.Series(iris.target_names[iris.target], dtype="category")
df, y = make_imbalance(
df,
y,
sampling_strategy={
"setosa": 30,
"versicolor": 20,
"virginica": 50,
},
)
classifier.set_params(sampling_strategy={"setosa": 20, "virginica": 20})
classifier.fit(df, y)
assert set(classifier.classes_) == set(y.cat.categories.tolist())
y_pred = classifier.predict(df)
assert set(y_pred) == set(y.cat.categories.tolist())
def check_param_validation(name, estimator_orig):
# Check that an informative error is raised when the value of a constructor
# parameter does not have an appropriate type or value.
rng = np.random.RandomState(0)
X = rng.uniform(size=(20, 5))
y = rng.randint(0, 2, size=20)
y = _enforce_estimator_tags_y(estimator_orig, y)
estimator_params = estimator_orig.get_params(deep=False).keys()
# check that there is a constraint for each parameter
if estimator_params:
validation_params = estimator_orig._parameter_constraints.keys()
unexpected_params = set(validation_params) - set(estimator_params)
missing_params = set(estimator_params) - set(validation_params)
err_msg = (
f"Mismatch between _parameter_constraints and the parameters of {name}."
f"\nConsider the unexpected parameters {unexpected_params} and expected but"
f" missing parameters {missing_params}"
)
assert validation_params == estimator_params, err_msg
# this object does not have a valid type for sure for all params
param_with_bad_type = type("BadType", (), {})()
fit_methods = ["fit", "partial_fit", "fit_transform", "fit_predict", "fit_resample"]
for param_name in estimator_params:
constraints = estimator_orig._parameter_constraints[param_name]
if constraints == "no_validation":
# This parameter is not validated
continue # pragma: no cover
match = rf"The '{param_name}' parameter of {name} must be .* Got .* instead."
err_msg = (
f"{name} does not raise an informative error message when the "
f"parameter {param_name} does not have a valid type or value."
)
estimator = clone(estimator_orig)
# First, check that the error is raised if param doesn't match any valid type.
estimator.set_params(**{param_name: param_with_bad_type})
for method in fit_methods:
if not hasattr(estimator, method):
# the method is not accessible with the current set of parameters
continue
with raises(ValueError, match=match, err_msg=err_msg):
if any(
isinstance(X_type, str) and X_type.endswith("labels")
for X_type in _safe_tags(estimator, key="X_types")
):
# The estimator is a label transformer and take only `y`
getattr(estimator, method)(y) # pragma: no cover
else:
getattr(estimator, method)(X, y)
# Then, for constraints that are more than a type constraint, check that the
# error is raised if param does match a valid type but does not match any valid
# value for this type.
constraints = [make_constraint(constraint) for constraint in constraints]
for constraint in constraints:
try:
bad_value = generate_invalid_param_val(constraint)
except NotImplementedError:
continue
estimator.set_params(**{param_name: bad_value})
for method in fit_methods:
if not hasattr(estimator, method):
# the method is not accessible with the current set of parameters
continue
with raises(ValueError, match=match, err_msg=err_msg):
if any(
X_type.endswith("labels")
for X_type in _safe_tags(estimator, key="X_types")
):
# The estimator is a label transformer and take only `y`
getattr(estimator, method)(y) # pragma: no cover
else:
getattr(estimator, method)(X, y)
def check_dataframe_column_names_consistency(name, estimator_orig):
try:
import pandas as pd
except ImportError:
raise SkipTest(
"pandas is not installed: not checking column name consistency for pandas"
)
tags = _safe_tags(estimator_orig)
is_supported_X_types = (
"2darray" in tags["X_types"] or "categorical" in tags["X_types"]
)
if not is_supported_X_types or tags["no_validation"]:
return
rng = np.random.RandomState(0)
estimator = clone(estimator_orig)
set_random_state(estimator)
X_orig = rng.normal(size=(150, 8))
X_orig = _enforce_estimator_tags_x(estimator, X_orig)
n_samples, n_features = X_orig.shape
names = np.array([f"col_{i}" for i in range(n_features)])
X = pd.DataFrame(X_orig, columns=names)
if is_regressor(estimator):
y = rng.normal(size=n_samples)
else:
y = rng.randint(low=0, high=2, size=n_samples)
y = _enforce_estimator_tags_y(estimator, y)
# Check that calling `fit` does not raise any warnings about feature names.
with warnings.catch_warnings():
warnings.filterwarnings(
"error",
message="X does not have valid feature names",
category=UserWarning,
module="imblearn",
)
estimator.fit(X, y)
if not hasattr(estimator, "feature_names_in_"):
raise ValueError(
"Estimator does not have a feature_names_in_ "
"attribute after fitting with a dataframe"
)
assert isinstance(estimator.feature_names_in_, np.ndarray)
assert estimator.feature_names_in_.dtype == object
assert_array_equal(estimator.feature_names_in_, names)
# Only check imblearn estimators for feature_names_in_ in docstring
module_name = estimator_orig.__module__
if (
module_name.startswith("imblearn.")
and not ("test_" in module_name or module_name.endswith("_testing"))
and ("feature_names_in_" not in (estimator_orig.__doc__))
):
raise ValueError(
f"Estimator {name} does not document its feature_names_in_ attribute"
)
check_methods = []
for method in (
"predict",
"transform",
"decision_function",
"predict_proba",
"score",
"score_samples",
"predict_log_proba",
):
if not hasattr(estimator, method):
continue
callable_method = getattr(estimator, method)
if method == "score":
callable_method = partial(callable_method, y=y)
check_methods.append((method, callable_method))
for _, method in check_methods:
with warnings.catch_warnings():
warnings.filterwarnings(
"error",
message="X does not have valid feature names",
category=UserWarning,
module="sklearn",
)
method(X) # works without UserWarning for valid features
invalid_names = [
(names[::-1], "Feature names must be in the same order as they were in fit."),
(
[f"another_prefix_{i}" for i in range(n_features)],
"Feature names unseen at fit time:\n- another_prefix_0\n-"
" another_prefix_1\n",
),
(
names[:3],
f"Feature names seen at fit time, yet now missing:\n- {min(names[3:])}\n",
),
]
params = {
key: value
for key, value in estimator.get_params().items()
if "early_stopping" in key
}
early_stopping_enabled = any(value is True for value in params.values())
for invalid_name, additional_message in invalid_names:
X_bad = pd.DataFrame(X, columns=invalid_name)
for name, method in check_methods:
if sklearn_version >= parse_version("1.2"):
expected_msg = re.escape(
"The feature names should match those that were passed during fit."
f"\n{additional_message}"
)
with raises(
ValueError, match=expected_msg, err_msg=f"{name} did not raise"
):
method(X_bad)
else:
expected_msg = re.escape(
"The feature names should match those that were passed "
"during fit. Starting version 1.2, an error will be raised.\n"
f"{additional_message}"
)
with warnings.catch_warnings():
warnings.filterwarnings(
"error",
category=FutureWarning,
module="sklearn",
)
with raises(
FutureWarning,
match=expected_msg,
err_msg=f"{name} did not raise",
):
method(X_bad)
# partial_fit checks on second call
# Do not call partial fit if early_stopping is on
if not hasattr(estimator, "partial_fit") or early_stopping_enabled:
continue
estimator = clone(estimator_orig)
if is_classifier(estimator):
classes = np.unique(y)
estimator.partial_fit(X, y, classes=classes)
else:
estimator.partial_fit(X, y)
with raises(ValueError, match=expected_msg):
estimator.partial_fit(X_bad, y)
def check_sampler_get_feature_names_out(name, sampler_orig):
tags = sampler_orig._get_tags()
if "2darray" not in tags["X_types"] or tags["no_validation"]:
return
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
X = StandardScaler().fit_transform(X)
sampler = clone(sampler_orig)
X = _enforce_estimator_tags_x(sampler, X)
n_features = X.shape[1]
set_random_state(sampler)
y_ = y
X_res, y_res = sampler.fit_resample(X, y=y_)
input_features = [f"feature{i}" for i in range(n_features)]
# input_features names is not the same length as n_features_in_
with raises(ValueError, match="input_features should have length equal"):
sampler.get_feature_names_out(input_features[::2])
feature_names_out = sampler.get_feature_names_out(input_features)
assert feature_names_out is not None
assert isinstance(feature_names_out, np.ndarray)
assert feature_names_out.dtype == object
assert all(isinstance(name, str) for name in feature_names_out)
n_features_out = X_res.shape[1]
assert (
len(feature_names_out) == n_features_out
), f"Expected {n_features_out} feature names, got {len(feature_names_out)}"
def check_sampler_get_feature_names_out_pandas(name, sampler_orig):
try:
import pandas as pd
except ImportError:
raise SkipTest(
"pandas is not installed: not checking column name consistency for pandas"
)
tags = sampler_orig._get_tags()
if "2darray" not in tags["X_types"] or tags["no_validation"]:
return
X, y = make_blobs(
n_samples=30,
centers=[[0, 0, 0], [1, 1, 1]],
random_state=0,
n_features=2,
cluster_std=0.1,
)
X = StandardScaler().fit_transform(X)
sampler = clone(sampler_orig)
X = _enforce_estimator_tags_x(sampler, X)
n_features = X.shape[1]
set_random_state(sampler)
y_ = y
feature_names_in = [f"col{i}" for i in range(n_features)]
df = pd.DataFrame(X, columns=feature_names_in)
X_res, y_res = sampler.fit_resample(df, y=y_)
# error is raised when `input_features` do not match feature_names_in
invalid_feature_names = [f"bad{i}" for i in range(n_features)]
with raises(ValueError, match="input_features is not equal to feature_names_in_"):
sampler.get_feature_names_out(invalid_feature_names)
feature_names_out_default = sampler.get_feature_names_out()
feature_names_in_explicit_names = sampler.get_feature_names_out(feature_names_in)
assert_array_equal(feature_names_out_default, feature_names_in_explicit_names)
n_features_out = X_res.shape[1]
assert (
len(feature_names_out_default) == n_features_out
), f"Expected {n_features_out} feature names, got {len(feature_names_out_default)}"
| 28,070 | 34.08875 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/_validation.py | """Utilities for input validation"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
import warnings
from collections import OrderedDict
from functools import wraps
from inspect import Parameter, signature
from numbers import Integral, Real
import numpy as np
from sklearn.base import clone
from sklearn.neighbors import NearestNeighbors
from sklearn.utils import check_array, column_or_1d
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import _num_samples
from .fixes import _is_pandas_df
SAMPLING_KIND = (
"over-sampling",
"under-sampling",
"clean-sampling",
"ensemble",
"bypass",
)
TARGET_KIND = ("binary", "multiclass", "multilabel-indicator")
class ArraysTransformer:
"""A class to convert sampler output arrays to their original types."""
def __init__(self, X, y):
self.x_props = self._gets_props(X)
self.y_props = self._gets_props(y)
def transform(self, X, y):
X = self._transfrom_one(X, self.x_props)
y = self._transfrom_one(y, self.y_props)
if self.x_props["type"].lower() == "dataframe" and self.y_props[
"type"
].lower() in {"series", "dataframe"}:
# We lost the y.index during resampling. We can safely use X.index to align
# them.
y.index = X.index
return X, y
def _gets_props(self, array):
props = {}
props["type"] = array.__class__.__name__
props["columns"] = getattr(array, "columns", None)
props["name"] = getattr(array, "name", None)
props["dtypes"] = getattr(array, "dtypes", None)
return props
def _transfrom_one(self, array, props):
type_ = props["type"].lower()
if type_ == "list":
ret = array.tolist()
elif type_ == "dataframe":
import pandas as pd
ret = pd.DataFrame(array, columns=props["columns"])
ret = ret.astype(props["dtypes"])
elif type_ == "series":
import pandas as pd
ret = pd.Series(array, dtype=props["dtypes"], name=props["name"])
else:
ret = array
return ret
def _is_neighbors_object(estimator):
"""Check that the estimator exposes a KNeighborsMixin-like API.
A KNeighborsMixin-like API exposes the following methods: (i) `kneighbors`,
(ii) `kneighbors_graph`.
Parameters
----------
estimator : object
A scikit-learn compatible estimator.
Returns
-------
is_neighbors_object : bool
True if the estimator exposes a KNeighborsMixin-like API.
"""
neighbors_attributes = ["kneighbors", "kneighbors_graph"]
return all(hasattr(estimator, attr) for attr in neighbors_attributes)
def check_neighbors_object(nn_name, nn_object, additional_neighbor=0):
"""Check the objects is consistent to be a k nearest neighbors.
Several methods in `imblearn` relies on k nearest neighbors. These objects
can be passed at initialisation as an integer or as an object that has
KNeighborsMixin-like attributes. This utility will create or clone said
object, ensuring it is KNeighbors-like.
Parameters
----------
nn_name : str
The name associated to the object to raise an error if needed.
nn_object : int or KNeighborsMixin
The object to be checked.
additional_neighbor : int, default=0
Sometimes, some algorithm need an additional neighbors.
Returns
-------
nn_object : KNeighborsMixin
The k-NN object.
"""
if isinstance(nn_object, Integral):
return NearestNeighbors(n_neighbors=nn_object + additional_neighbor)
# _is_neighbors_object(nn_object)
return clone(nn_object)
def _count_class_sample(y):
unique, counts = np.unique(y, return_counts=True)
return dict(zip(unique, counts))
def check_target_type(y, indicate_one_vs_all=False):
"""Check the target types to be conform to the current samplers.
The current samplers should be compatible with ``'binary'``,
``'multilabel-indicator'`` and ``'multiclass'`` targets only.
Parameters
----------
y : ndarray
The array containing the target.
indicate_one_vs_all : bool, default=False
Either to indicate if the targets are encoded in a one-vs-all fashion.
Returns
-------
y : ndarray
The returned target.
is_one_vs_all : bool, optional
Indicate if the target was originally encoded in a one-vs-all fashion.
Only returned if ``indicate_multilabel=True``.
"""
type_y = type_of_target(y)
if type_y == "multilabel-indicator":
if np.any(y.sum(axis=1) > 1):
raise ValueError(
"Imbalanced-learn currently supports binary, multiclass and "
"binarized encoded multiclasss targets. Multilabel and "
"multioutput targets are not supported."
)
y = y.argmax(axis=1)
else:
y = column_or_1d(y)
return (y, type_y == "multilabel-indicator") if indicate_one_vs_all else y
def _sampling_strategy_all(y, sampling_type):
"""Returns sampling target by targeting all classes."""
target_stats = _count_class_sample(y)
if sampling_type == "over-sampling":
n_sample_majority = max(target_stats.values())
sampling_strategy = {
key: n_sample_majority - value for (key, value) in target_stats.items()
}
elif sampling_type == "under-sampling" or sampling_type == "clean-sampling":
n_sample_minority = min(target_stats.values())
sampling_strategy = {key: n_sample_minority for key in target_stats.keys()}
else:
raise NotImplementedError
return sampling_strategy
def _sampling_strategy_majority(y, sampling_type):
"""Returns sampling target by targeting the majority class only."""
if sampling_type == "over-sampling":
raise ValueError(
"'sampling_strategy'='majority' cannot be used with over-sampler."
)
elif sampling_type == "under-sampling" or sampling_type == "clean-sampling":
target_stats = _count_class_sample(y)
class_majority = max(target_stats, key=target_stats.get)
n_sample_minority = min(target_stats.values())
sampling_strategy = {
key: n_sample_minority
for key in target_stats.keys()
if key == class_majority
}
else:
raise NotImplementedError
return sampling_strategy
def _sampling_strategy_not_majority(y, sampling_type):
"""Returns sampling target by targeting all classes but not the
majority."""
target_stats = _count_class_sample(y)
if sampling_type == "over-sampling":
n_sample_majority = max(target_stats.values())
class_majority = max(target_stats, key=target_stats.get)
sampling_strategy = {
key: n_sample_majority - value
for (key, value) in target_stats.items()
if key != class_majority
}
elif sampling_type == "under-sampling" or sampling_type == "clean-sampling":
n_sample_minority = min(target_stats.values())
class_majority = max(target_stats, key=target_stats.get)
sampling_strategy = {
key: n_sample_minority
for key in target_stats.keys()
if key != class_majority
}
else:
raise NotImplementedError
return sampling_strategy
def _sampling_strategy_not_minority(y, sampling_type):
"""Returns sampling target by targeting all classes but not the
minority."""
target_stats = _count_class_sample(y)
if sampling_type == "over-sampling":
n_sample_majority = max(target_stats.values())
class_minority = min(target_stats, key=target_stats.get)
sampling_strategy = {
key: n_sample_majority - value
for (key, value) in target_stats.items()
if key != class_minority
}
elif sampling_type == "under-sampling" or sampling_type == "clean-sampling":
n_sample_minority = min(target_stats.values())
class_minority = min(target_stats, key=target_stats.get)
sampling_strategy = {
key: n_sample_minority
for key in target_stats.keys()
if key != class_minority
}
else:
raise NotImplementedError
return sampling_strategy
def _sampling_strategy_minority(y, sampling_type):
"""Returns sampling target by targeting the minority class only."""
target_stats = _count_class_sample(y)
if sampling_type == "over-sampling":
n_sample_majority = max(target_stats.values())
class_minority = min(target_stats, key=target_stats.get)
sampling_strategy = {
key: n_sample_majority - value
for (key, value) in target_stats.items()
if key == class_minority
}
elif sampling_type == "under-sampling" or sampling_type == "clean-sampling":
raise ValueError(
"'sampling_strategy'='minority' cannot be used with"
" under-sampler and clean-sampler."
)
else:
raise NotImplementedError
return sampling_strategy
def _sampling_strategy_auto(y, sampling_type):
"""Returns sampling target auto for over-sampling and not-minority for
under-sampling."""
if sampling_type == "over-sampling":
return _sampling_strategy_not_majority(y, sampling_type)
elif sampling_type == "under-sampling" or sampling_type == "clean-sampling":
return _sampling_strategy_not_minority(y, sampling_type)
def _sampling_strategy_dict(sampling_strategy, y, sampling_type):
"""Returns sampling target by converting the dictionary depending of the
sampling."""
target_stats = _count_class_sample(y)
# check that all keys in sampling_strategy are also in y
set_diff_sampling_strategy_target = set(sampling_strategy.keys()) - set(
target_stats.keys()
)
if len(set_diff_sampling_strategy_target) > 0:
raise ValueError(
f"The {set_diff_sampling_strategy_target} target class is/are not "
f"present in the data."
)
# check that there is no negative number
if any(n_samples < 0 for n_samples in sampling_strategy.values()):
raise ValueError(
f"The number of samples in a class cannot be negative."
f"'sampling_strategy' contains some negative value: {sampling_strategy}"
)
sampling_strategy_ = {}
if sampling_type == "over-sampling":
max(target_stats.values())
max(target_stats, key=target_stats.get)
for class_sample, n_samples in sampling_strategy.items():
if n_samples < target_stats[class_sample]:
raise ValueError(
f"With over-sampling methods, the number"
f" of samples in a class should be greater"
f" or equal to the original number of samples."
f" Originally, there is {target_stats[class_sample]} "
f"samples and {n_samples} samples are asked."
)
sampling_strategy_[class_sample] = n_samples - target_stats[class_sample]
elif sampling_type == "under-sampling":
for class_sample, n_samples in sampling_strategy.items():
if n_samples > target_stats[class_sample]:
raise ValueError(
f"With under-sampling methods, the number of"
f" samples in a class should be less or equal"
f" to the original number of samples."
f" Originally, there is {target_stats[class_sample]} "
f"samples and {n_samples} samples are asked."
)
sampling_strategy_[class_sample] = n_samples
elif sampling_type == "clean-sampling":
raise ValueError(
"'sampling_strategy' as a dict for cleaning methods is "
"not supported. Please give a list of the classes to be "
"targeted by the sampling."
)
else:
raise NotImplementedError
return sampling_strategy_
def _sampling_strategy_list(sampling_strategy, y, sampling_type):
"""With cleaning methods, sampling_strategy can be a list to target the
class of interest."""
if sampling_type != "clean-sampling":
raise ValueError(
"'sampling_strategy' cannot be a list for samplers "
"which are not cleaning methods."
)
target_stats = _count_class_sample(y)
# check that all keys in sampling_strategy are also in y
set_diff_sampling_strategy_target = set(sampling_strategy) - set(
target_stats.keys()
)
if len(set_diff_sampling_strategy_target) > 0:
raise ValueError(
f"The {set_diff_sampling_strategy_target} target class is/are not "
f"present in the data."
)
return {
class_sample: min(target_stats.values()) for class_sample in sampling_strategy
}
def _sampling_strategy_float(sampling_strategy, y, sampling_type):
"""Take a proportion of the majority (over-sampling) or minority
(under-sampling) class in binary classification."""
type_y = type_of_target(y)
if type_y != "binary":
raise ValueError(
'"sampling_strategy" can be a float only when the type '
"of target is binary. For multi-class, use a dict."
)
target_stats = _count_class_sample(y)
if sampling_type == "over-sampling":
n_sample_majority = max(target_stats.values())
class_majority = max(target_stats, key=target_stats.get)
sampling_strategy_ = {
key: int(n_sample_majority * sampling_strategy - value)
for (key, value) in target_stats.items()
if key != class_majority
}
if any([n_samples <= 0 for n_samples in sampling_strategy_.values()]):
raise ValueError(
"The specified ratio required to remove samples "
"from the minority class while trying to "
"generate new samples. Please increase the "
"ratio."
)
elif sampling_type == "under-sampling":
n_sample_minority = min(target_stats.values())
class_minority = min(target_stats, key=target_stats.get)
sampling_strategy_ = {
key: int(n_sample_minority / sampling_strategy)
for (key, value) in target_stats.items()
if key != class_minority
}
if any(
[
n_samples > target_stats[target]
for target, n_samples in sampling_strategy_.items()
]
):
raise ValueError(
"The specified ratio required to generate new "
"sample in the majority class while trying to "
"remove samples. Please increase the ratio."
)
else:
raise ValueError(
"'clean-sampling' methods do let the user specify the sampling ratio."
)
return sampling_strategy_
def check_sampling_strategy(sampling_strategy, y, sampling_type, **kwargs):
"""Sampling target validation for samplers.
Checks that ``sampling_strategy`` is of consistent type and return a
dictionary containing each targeted class with its corresponding
number of sample. It is used in :class:`~imblearn.base.BaseSampler`.
Parameters
----------
sampling_strategy : float, str, dict, list or callable,
Sampling information to sample the data set.
- When ``float``:
For **under-sampling methods**, it corresponds to the ratio
:math:`\\alpha_{us}` defined by :math:`N_{rM} = \\alpha_{us}
\\times N_{m}` where :math:`N_{rM}` and :math:`N_{m}` are the
number of samples in the majority class after resampling and the
number of samples in the minority class, respectively;
For **over-sampling methods**, it correspond to the ratio
:math:`\\alpha_{os}` defined by :math:`N_{rm} = \\alpha_{os}
\\times N_{m}` where :math:`N_{rm}` and :math:`N_{M}` are the
number of samples in the minority class after resampling and the
number of samples in the majority class, respectively.
.. warning::
``float`` is only available for **binary** classification. An
error is raised for multi-class classification and with cleaning
samplers.
- When ``str``, specify the class targeted by the resampling. For
**under- and over-sampling methods**, the number of samples in the
different classes will be equalized. For **cleaning methods**, the
number of samples will not be equal. Possible choices are:
``'minority'``: resample only the minority class;
``'majority'``: resample only the majority class;
``'not minority'``: resample all classes but the minority class;
``'not majority'``: resample all classes but the majority class;
``'all'``: resample all classes;
``'auto'``: for under-sampling methods, equivalent to ``'not
minority'`` and for over-sampling methods, equivalent to ``'not
majority'``.
- When ``dict``, the keys correspond to the targeted classes. The
values correspond to the desired number of samples for each targeted
class.
.. warning::
``dict`` is available for both **under- and over-sampling
methods**. An error is raised with **cleaning methods**. Use a
``list`` instead.
- When ``list``, the list contains the targeted classes. It used only
for **cleaning methods**.
.. warning::
``list`` is available for **cleaning methods**. An error is raised
with **under- and over-sampling methods**.
- When callable, function taking ``y`` and returns a ``dict``. The keys
correspond to the targeted classes. The values correspond to the
desired number of samples for each class.
y : ndarray of shape (n_samples,)
The target array.
sampling_type : {{'over-sampling', 'under-sampling', 'clean-sampling'}}
The type of sampling. Can be either ``'over-sampling'``,
``'under-sampling'``, or ``'clean-sampling'``.
**kwargs : dict
Dictionary of additional keyword arguments to pass to
``sampling_strategy`` when this is a callable.
Returns
-------
sampling_strategy_converted : dict
The converted and validated sampling target. Returns a dictionary with
the key being the class target and the value being the desired
number of samples.
"""
if sampling_type not in SAMPLING_KIND:
raise ValueError(
f"'sampling_type' should be one of {SAMPLING_KIND}. "
f"Got '{sampling_type} instead."
)
if np.unique(y).size <= 1:
raise ValueError(
f"The target 'y' needs to have more than 1 class. "
f"Got {np.unique(y).size} class instead"
)
if sampling_type in ("ensemble", "bypass"):
return sampling_strategy
if isinstance(sampling_strategy, str):
if sampling_strategy not in SAMPLING_TARGET_KIND.keys():
raise ValueError(
f"When 'sampling_strategy' is a string, it needs"
f" to be one of {SAMPLING_TARGET_KIND}. Got '{sampling_strategy}' "
f"instead."
)
return OrderedDict(
sorted(SAMPLING_TARGET_KIND[sampling_strategy](y, sampling_type).items())
)
elif isinstance(sampling_strategy, dict):
return OrderedDict(
sorted(_sampling_strategy_dict(sampling_strategy, y, sampling_type).items())
)
elif isinstance(sampling_strategy, list):
return OrderedDict(
sorted(_sampling_strategy_list(sampling_strategy, y, sampling_type).items())
)
elif isinstance(sampling_strategy, Real):
if sampling_strategy <= 0 or sampling_strategy > 1:
raise ValueError(
f"When 'sampling_strategy' is a float, it should be "
f"in the range (0, 1]. Got {sampling_strategy} instead."
)
return OrderedDict(
sorted(
_sampling_strategy_float(sampling_strategy, y, sampling_type).items()
)
)
elif callable(sampling_strategy):
sampling_strategy_ = sampling_strategy(y, **kwargs)
return OrderedDict(
sorted(
_sampling_strategy_dict(sampling_strategy_, y, sampling_type).items()
)
)
SAMPLING_TARGET_KIND = {
"minority": _sampling_strategy_minority,
"majority": _sampling_strategy_majority,
"not minority": _sampling_strategy_not_minority,
"not majority": _sampling_strategy_not_majority,
"all": _sampling_strategy_all,
"auto": _sampling_strategy_auto,
}
def _deprecate_positional_args(f):
"""Decorator for methods that issues warnings for positional arguments
Using the keyword-only argument syntax in pep 3102, arguments after the
* will issue a warning when passed as a positional argument.
Parameters
----------
f : function
function to check arguments on.
"""
sig = signature(f)
kwonly_args = []
all_args = []
for name, param in sig.parameters.items():
if param.kind == Parameter.POSITIONAL_OR_KEYWORD:
all_args.append(name)
elif param.kind == Parameter.KEYWORD_ONLY:
kwonly_args.append(name)
@wraps(f)
def inner_f(*args, **kwargs):
extra_args = len(args) - len(all_args)
if extra_args > 0:
# ignore first 'self' argument for instance methods
args_msg = [
f"{name}={arg}"
for name, arg in zip(kwonly_args[:extra_args], args[-extra_args:])
]
warnings.warn(
f"Pass {', '.join(args_msg)} as keyword args. From version 0.9 "
f"passing these as positional arguments will "
f"result in an error",
FutureWarning,
)
kwargs.update({k: arg for k, arg in zip(sig.parameters, args)})
return f(**kwargs)
return inner_f
def _check_X(X):
"""Check X and do not check it if a dataframe."""
n_samples = _num_samples(X)
if n_samples < 1:
raise ValueError(
f"Found array with {n_samples} sample(s) while a minimum of 1 is "
"required."
)
if _is_pandas_df(X):
return X
return check_array(
X, dtype=None, accept_sparse=["csr", "csc"], force_all_finite=False
)
| 22,939 | 35.586922 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/_available_if.py | """This is a copy of sklearn/utils/_available_if.py. It can be removed when
we support scikit-learn >= 1.1.
"""
# mypy: ignore-errors
from functools import update_wrapper, wraps
from types import MethodType
import sklearn
from sklearn.utils import parse_version
sklearn_version = parse_version(sklearn.__version__)
if sklearn_version < parse_version("1.1"):
class _AvailableIfDescriptor:
"""Implements a conditional property using the descriptor protocol.
Using this class to create a decorator will raise an ``AttributeError``
if check(self) returns a falsey value. Note that if check raises an error
this will also result in hasattr returning false.
See https://docs.python.org/3/howto/descriptor.html for an explanation of
descriptors.
"""
def __init__(self, fn, check, attribute_name):
self.fn = fn
self.check = check
self.attribute_name = attribute_name
# update the docstring of the descriptor
update_wrapper(self, fn)
def __get__(self, obj, owner=None):
attr_err = AttributeError(
f"This {owner.__name__!r} has no attribute {self.attribute_name!r}"
)
if obj is not None:
# delegate only on instances, not the classes.
# this is to allow access to the docstrings.
if not self.check(obj):
raise attr_err
out = MethodType(self.fn, obj)
else:
# This makes it possible to use the decorated method as an
# unbound method, for instance when monkeypatching.
@wraps(self.fn)
def out(*args, **kwargs):
if not self.check(args[0]):
raise attr_err
return self.fn(*args, **kwargs)
return out
def available_if(check):
"""An attribute that is available only if check returns a truthy value.
Parameters
----------
check : callable
When passed the object with the decorated method, this should return
a truthy value if the attribute is available, and either return False
or raise an AttributeError if not available.
Returns
-------
callable
Callable makes the decorated method available if `check` returns
a truthy value, otherwise the decorated method is unavailable.
Examples
--------
>>> from sklearn.utils.metaestimators import available_if
>>> class HelloIfEven:
... def __init__(self, x):
... self.x = x
...
... def _x_is_even(self):
... return self.x % 2 == 0
...
... @available_if(_x_is_even)
... def say_hello(self):
... print("Hello")
...
>>> obj = HelloIfEven(1)
>>> hasattr(obj, "say_hello")
False
>>> obj.x = 2
>>> hasattr(obj, "say_hello")
True
>>> obj.say_hello()
Hello
"""
return lambda fn: _AvailableIfDescriptor(fn, check, attribute_name=fn.__name__)
else:
from sklearn.utils.metaestimators import available_if # noqa
| 3,316 | 32.17 | 87 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/__init__.py | """
The :mod:`imblearn.utils` module includes various utilities.
"""
from ._docstring import Substitution
from ._validation import (
check_neighbors_object,
check_sampling_strategy,
check_target_type,
)
__all__ = [
"check_neighbors_object",
"check_sampling_strategy",
"check_target_type",
"Substitution",
]
| 337 | 17.777778 | 60 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/fixes.py | """Compatibility fixes for older version of python, numpy, scipy, and
scikit-learn.
If you add content to this file, please give the version of the package at
which the fix is no longer needed.
"""
import functools
import sys
import numpy as np
import scipy
import scipy.stats
import sklearn
from sklearn.utils.fixes import parse_version
from .._config import config_context, get_config
sp_version = parse_version(scipy.__version__)
sklearn_version = parse_version(sklearn.__version__)
# TODO: Remove when SciPy 1.9 is the minimum supported version
def _mode(a, axis=0):
if sp_version >= parse_version("1.9.0"):
return scipy.stats.mode(a, axis=axis, keepdims=True)
return scipy.stats.mode(a, axis=axis)
# TODO: Remove when scikit-learn 1.1 is the minimum supported version
if sklearn_version >= parse_version("1.1"):
from sklearn.utils.validation import _is_arraylike_not_scalar
else:
from sklearn.utils.validation import _is_arraylike
def _is_arraylike_not_scalar(array):
"""Return True if array is array-like and not a scalar"""
return _is_arraylike(array) and not np.isscalar(array)
# TODO: remove when scikit-learn minimum version is 1.3
if sklearn_version < parse_version("1.3"):
def _fit_context(*, prefer_skip_nested_validation):
"""Decorator to run the fit methods of estimators within context managers.
Parameters
----------
prefer_skip_nested_validation : bool
If True, the validation of parameters of inner estimators or functions
called during fit will be skipped.
This is useful to avoid validating many times the parameters passed by the
user from the public facing API. It's also useful to avoid validating
parameters that we pass internally to inner functions that are guaranteed to
be valid by the test suite.
It should be set to True for most estimators, except for those that receive
non-validated objects as parameters, such as meta-estimators that are given
estimator objects.
Returns
-------
decorated_fit : method
The decorated fit method.
"""
def decorator(fit_method):
@functools.wraps(fit_method)
def wrapper(estimator, *args, **kwargs):
global_skip_validation = get_config()["skip_parameter_validation"]
# we don't want to validate again for each call to partial_fit
partial_fit_and_fitted = (
fit_method.__name__ == "partial_fit" and _is_fitted(estimator)
)
if not global_skip_validation and not partial_fit_and_fitted:
estimator._validate_params()
with config_context(
skip_parameter_validation=(
prefer_skip_nested_validation or global_skip_validation
)
):
return fit_method(estimator, *args, **kwargs)
return wrapper
return decorator
else:
from sklearn.base import _fit_context # type: ignore[no-redef] # noqa
# TODO: remove when scikit-learn minimum version is 1.3
if sklearn_version < parse_version("1.3"):
def _is_fitted(estimator, attributes=None, all_or_any=all):
"""Determine if an estimator is fitted
Parameters
----------
estimator : estimator instance
Estimator instance for which the check is performed.
attributes : str, list or tuple of str, default=None
Attribute name(s) given as string or a list/tuple of strings
Eg.: ``["coef_", "estimator_", ...], "coef_"``
If `None`, `estimator` is considered fitted if there exist an
attribute that ends with a underscore and does not start with double
underscore.
all_or_any : callable, {all, any}, default=all
Specify whether all or any of the given attributes must exist.
Returns
-------
fitted : bool
Whether the estimator is fitted.
"""
if attributes is not None:
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
return all_or_any([hasattr(estimator, attr) for attr in attributes])
if hasattr(estimator, "__sklearn_is_fitted__"):
return estimator.__sklearn_is_fitted__()
fitted_attrs = [
v for v in vars(estimator) if v.endswith("_") and not v.startswith("__")
]
return len(fitted_attrs) > 0
else:
from sklearn.utils.validation import _is_fitted # type: ignore[no-redef]
try:
from sklearn.utils.validation import _is_pandas_df
except ImportError:
def _is_pandas_df(X):
"""Return True if the X is a pandas dataframe."""
if hasattr(X, "columns") and hasattr(X, "iloc"):
# Likely a pandas DataFrame, we explicitly check the type to confirm.
try:
pd = sys.modules["pandas"]
except KeyError:
return False
return isinstance(X, pd.DataFrame)
return False
| 5,221 | 33.582781 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/_param_validation.py | """This is a copy of sklearn/utils/_param_validation.py. It can be removed when
we support scikit-learn >= 1.2.
"""
# mypy: ignore-errors
import functools
import math
import operator
import re
import warnings
from abc import ABC, abstractmethod
from collections.abc import Iterable
from inspect import signature
from numbers import Integral, Real
import numpy as np
import sklearn
from scipy.sparse import csr_matrix, issparse
from sklearn.utils.fixes import parse_version
from .._config import config_context, get_config
from ..utils.fixes import _is_arraylike_not_scalar
sklearn_version = parse_version(sklearn.__version__)
if sklearn_version < parse_version("1.3"):
# TODO: remove `if True` when we have clear support for:
# - ignoring `*args` and `**kwargs` in the signature
class InvalidParameterError(ValueError, TypeError):
"""Custom exception to be raised when the parameter of a
class/method/function does not have a valid type or value.
"""
# Inherits from ValueError and TypeError to keep backward compatibility.
def validate_parameter_constraints(parameter_constraints, params, caller_name):
"""Validate types and values of given parameters.
Parameters
----------
parameter_constraints : dict or {"no_validation"}
If "no_validation", validation is skipped for this parameter.
If a dict, it must be a dictionary `param_name: list of constraints`.
A parameter is valid if it satisfies one of the constraints from the list.
Constraints can be:
- an Interval object, representing a continuous or discrete range of numbers
- the string "array-like"
- the string "sparse matrix"
- the string "random_state"
- callable
- None, meaning that None is a valid value for the parameter
- any type, meaning that any instance of this type is valid
- an Options object, representing a set of elements of a given type
- a StrOptions object, representing a set of strings
- the string "boolean"
- the string "verbose"
- the string "cv_object"
- a MissingValues object representing markers for missing values
- a HasMethods object, representing method(s) an object must have
- a Hidden object, representing a constraint not meant to be exposed to the
user
params : dict
A dictionary `param_name: param_value`. The parameters to validate
against the constraints.
caller_name : str
The name of the estimator or function or method that called this function.
"""
for param_name, param_val in params.items():
# We allow parameters to not have a constraint so that third party
# estimators can inherit from sklearn estimators without having to
# necessarily use the validation tools.
if param_name not in parameter_constraints:
continue
constraints = parameter_constraints[param_name]
if constraints == "no_validation":
continue
constraints = [make_constraint(constraint) for constraint in constraints]
for constraint in constraints:
if constraint.is_satisfied_by(param_val):
# this constraint is satisfied, no need to check further.
break
else:
# No constraint is satisfied, raise with an informative message.
# Ignore constraints that we don't want to expose in the error
# message, i.e. options that are for internal purpose or not
# officially supported.
constraints = [
constraint for constraint in constraints if not constraint.hidden
]
if len(constraints) == 1:
constraints_str = f"{constraints[0]}"
else:
constraints_str = (
f"{', '.join([str(c) for c in constraints[:-1]])} or"
f" {constraints[-1]}"
)
raise InvalidParameterError(
f"The {param_name!r} parameter of {caller_name} must be"
f" {constraints_str}. Got {param_val!r} instead."
)
def make_constraint(constraint):
"""Convert the constraint into the appropriate Constraint object.
Parameters
----------
constraint : object
The constraint to convert.
Returns
-------
constraint : instance of _Constraint
The converted constraint.
"""
if isinstance(constraint, str) and constraint == "array-like":
return _ArrayLikes()
if isinstance(constraint, str) and constraint == "sparse matrix":
return _SparseMatrices()
if isinstance(constraint, str) and constraint == "random_state":
return _RandomStates()
if constraint is callable:
return _Callables()
if constraint is None:
return _NoneConstraint()
if isinstance(constraint, type):
return _InstancesOf(constraint)
if isinstance(
constraint, (Interval, StrOptions, Options, HasMethods, MissingValues)
):
return constraint
if isinstance(constraint, str) and constraint == "boolean":
return _Booleans()
if isinstance(constraint, str) and constraint == "verbose":
return _VerboseHelper()
if isinstance(constraint, str) and constraint == "cv_object":
return _CVObjects()
if isinstance(constraint, Hidden):
constraint = make_constraint(constraint.constraint)
constraint.hidden = True
return constraint
raise ValueError(f"Unknown constraint type: {constraint}")
def validate_params(parameter_constraints, *, prefer_skip_nested_validation):
"""Decorator to validate types and values of functions and methods.
Parameters
----------
parameter_constraints : dict
A dictionary `param_name: list of constraints`. See the docstring
of `validate_parameter_constraints` for a description of the
accepted constraints.
Note that the *args and **kwargs parameters are not validated and
must not be present in the parameter_constraints dictionary.
prefer_skip_nested_validation : bool
If True, the validation of parameters of inner estimators or functions
called by the decorated function will be skipped.
This is useful to avoid validating many times the parameters passed by the
user from the public facing API. It's also useful to avoid validating
parameters that we pass internally to inner functions that are guaranteed to
be valid by the test suite.
It should be set to True for most functions, except for those that receive
non-validated objects as parameters or that are just wrappers around classes
because they only perform a partial validation.
Returns
-------
decorated_function : function or method
The decorated function.
"""
def decorator(func):
# The dict of parameter constraints is set as an attribute of the function
# to make it possible to dynamically introspect the constraints for
# automatic testing.
setattr(func, "_skl_parameter_constraints", parameter_constraints)
@functools.wraps(func)
def wrapper(*args, **kwargs):
global_skip_validation = get_config()["skip_parameter_validation"]
if global_skip_validation:
return func(*args, **kwargs)
func_sig = signature(func)
# Map *args/**kwargs to the function signature
params = func_sig.bind(*args, **kwargs)
params.apply_defaults()
# ignore self/cls and positional/keyword markers
to_ignore = [
p.name
for p in func_sig.parameters.values()
if p.kind in (p.VAR_POSITIONAL, p.VAR_KEYWORD)
]
to_ignore += ["self", "cls"]
params = {
k: v for k, v in params.arguments.items() if k not in to_ignore
}
validate_parameter_constraints(
parameter_constraints, params, caller_name=func.__qualname__
)
try:
with config_context(
skip_parameter_validation=(
prefer_skip_nested_validation or global_skip_validation
)
):
return func(*args, **kwargs)
except InvalidParameterError as e:
# When the function is just a wrapper around an estimator,
# we allow the function to delegate validation to the
# estimator, but we replace the name of the estimator by
# the name of the function in the error message to avoid
# confusion.
msg = re.sub(
r"parameter of \w+ must be",
f"parameter of {func.__qualname__} must be",
str(e),
)
raise InvalidParameterError(msg) from e
return wrapper
return decorator
class RealNotInt(Real):
"""A type that represents reals that are not instances of int.
Behaves like float, but also works with values extracted from numpy arrays.
isintance(1, RealNotInt) -> False
isinstance(1.0, RealNotInt) -> True
"""
RealNotInt.register(float)
def _type_name(t):
"""Convert type into human readable string."""
module = t.__module__
qualname = t.__qualname__
if module == "builtins":
return qualname
elif t == Real:
return "float"
elif t == Integral:
return "int"
return f"{module}.{qualname}"
class _Constraint(ABC):
"""Base class for the constraint objects."""
def __init__(self):
self.hidden = False
@abstractmethod
def is_satisfied_by(self, val):
"""Whether or not a value satisfies the constraint.
Parameters
----------
val : object
The value to check.
Returns
-------
is_satisfied : bool
Whether or not the constraint is satisfied by this value.
"""
@abstractmethod
def __str__(self):
"""A human readable representational string of the constraint."""
class _InstancesOf(_Constraint):
"""Constraint representing instances of a given type.
Parameters
----------
type : type
The valid type.
"""
def __init__(self, type):
super().__init__()
self.type = type
def is_satisfied_by(self, val):
return isinstance(val, self.type)
def __str__(self):
return f"an instance of {_type_name(self.type)!r}"
class _NoneConstraint(_Constraint):
"""Constraint representing the None singleton."""
def is_satisfied_by(self, val):
return val is None
def __str__(self):
return "None"
class _NanConstraint(_Constraint):
"""Constraint representing the indicator `np.nan`."""
def is_satisfied_by(self, val):
return isinstance(val, Real) and math.isnan(val)
def __str__(self):
return "numpy.nan"
class _PandasNAConstraint(_Constraint):
"""Constraint representing the indicator `pd.NA`."""
def is_satisfied_by(self, val):
try:
import pandas as pd
return isinstance(val, type(pd.NA)) and pd.isna(val)
except ImportError:
return False
def __str__(self):
return "pandas.NA"
class Options(_Constraint):
"""Constraint representing a finite set of instances of a given type.
Parameters
----------
type : type
options : set
The set of valid scalars.
deprecated : set or None, default=None
A subset of the `options` to mark as deprecated in the string
representation of the constraint.
"""
def __init__(self, type, options, *, deprecated=None):
super().__init__()
self.type = type
self.options = options
self.deprecated = deprecated or set()
if self.deprecated - self.options:
raise ValueError(
"The deprecated options must be a subset of the options."
)
def is_satisfied_by(self, val):
return isinstance(val, self.type) and val in self.options
def _mark_if_deprecated(self, option):
"""Add a deprecated mark to an option if needed."""
option_str = f"{option!r}"
if option in self.deprecated:
option_str = f"{option_str} (deprecated)"
return option_str
def __str__(self):
options_str = (
f"{', '.join([self._mark_if_deprecated(o) for o in self.options])}"
)
return f"a {_type_name(self.type)} among {{{options_str}}}"
class StrOptions(Options):
"""Constraint representing a finite set of strings.
Parameters
----------
options : set of str
The set of valid strings.
deprecated : set of str or None, default=None
A subset of the `options` to mark as deprecated in the string
representation of the constraint.
"""
def __init__(self, options, *, deprecated=None):
super().__init__(type=str, options=options, deprecated=deprecated)
class Interval(_Constraint):
"""Constraint representing a typed interval.
Parameters
----------
type : {numbers.Integral, numbers.Real, RealNotInt}
The set of numbers in which to set the interval.
If RealNotInt, only reals that don't have the integer type
are allowed. For example 1.0 is allowed but 1 is not.
left : float or int or None
The left bound of the interval. None means left bound is -∞.
right : float, int or None
The right bound of the interval. None means right bound is +∞.
closed : {"left", "right", "both", "neither"}
Whether the interval is open or closed. Possible choices are:
- `"left"`: the interval is closed on the left and open on the right.
It is equivalent to the interval `[ left, right )`.
- `"right"`: the interval is closed on the right and open on the left.
It is equivalent to the interval `( left, right ]`.
- `"both"`: the interval is closed.
It is equivalent to the interval `[ left, right ]`.
- `"neither"`: the interval is open.
It is equivalent to the interval `( left, right )`.
Notes
-----
Setting a bound to `None` and setting the interval closed is valid. For
instance, strictly speaking, `Interval(Real, 0, None, closed="both")`
corresponds to `[0, +∞) U {+∞}`.
"""
def __init__(self, type, left, right, *, closed):
super().__init__()
self.type = type
self.left = left
self.right = right
self.closed = closed
self._check_params()
def _check_params(self):
if self.type not in (Integral, Real, RealNotInt):
raise ValueError(
"type must be either numbers.Integral, numbers.Real or RealNotInt."
f" Got {self.type} instead."
)
if self.closed not in ("left", "right", "both", "neither"):
raise ValueError(
"closed must be either 'left', 'right', 'both' or 'neither'. "
f"Got {self.closed} instead."
)
if self.type is Integral:
suffix = "for an interval over the integers."
if self.left is not None and not isinstance(self.left, Integral):
raise TypeError(f"Expecting left to be an int {suffix}")
if self.right is not None and not isinstance(self.right, Integral):
raise TypeError(f"Expecting right to be an int {suffix}")
if self.left is None and self.closed in ("left", "both"):
raise ValueError(
f"left can't be None when closed == {self.closed} {suffix}"
)
if self.right is None and self.closed in ("right", "both"):
raise ValueError(
f"right can't be None when closed == {self.closed} {suffix}"
)
else:
if self.left is not None and not isinstance(self.left, Real):
raise TypeError("Expecting left to be a real number.")
if self.right is not None and not isinstance(self.right, Real):
raise TypeError("Expecting right to be a real number.")
if (
self.right is not None
and self.left is not None
and self.right <= self.left
):
raise ValueError(
f"right can't be less than left. Got left={self.left} and "
f"right={self.right}"
)
def __contains__(self, val):
if np.isnan(val):
return False
left_cmp = operator.lt if self.closed in ("left", "both") else operator.le
right_cmp = operator.gt if self.closed in ("right", "both") else operator.ge
left = -np.inf if self.left is None else self.left
right = np.inf if self.right is None else self.right
if left_cmp(val, left):
return False
if right_cmp(val, right):
return False
return True
def is_satisfied_by(self, val):
if not isinstance(val, self.type):
return False
return val in self
def __str__(self):
type_str = "an int" if self.type is Integral else "a float"
left_bracket = "[" if self.closed in ("left", "both") else "("
left_bound = "-inf" if self.left is None else self.left
right_bound = "inf" if self.right is None else self.right
right_bracket = "]" if self.closed in ("right", "both") else ")"
# better repr if the bounds were given as integers
if not self.type == Integral and isinstance(self.left, Real):
left_bound = float(left_bound)
if not self.type == Integral and isinstance(self.right, Real):
right_bound = float(right_bound)
return (
f"{type_str} in the range "
f"{left_bracket}{left_bound}, {right_bound}{right_bracket}"
)
class _ArrayLikes(_Constraint):
"""Constraint representing array-likes"""
def is_satisfied_by(self, val):
return _is_arraylike_not_scalar(val)
def __str__(self):
return "an array-like"
class _SparseMatrices(_Constraint):
"""Constraint representing sparse matrices."""
def is_satisfied_by(self, val):
return issparse(val)
def __str__(self):
return "a sparse matrix"
class _Callables(_Constraint):
"""Constraint representing callables."""
def is_satisfied_by(self, val):
return callable(val)
def __str__(self):
return "a callable"
class _RandomStates(_Constraint):
"""Constraint representing random states.
Convenience class for
[Interval(Integral, 0, 2**32 - 1, closed="both"), np.random.RandomState, None]
"""
def __init__(self):
super().__init__()
self._constraints = [
Interval(Integral, 0, 2**32 - 1, closed="both"),
_InstancesOf(np.random.RandomState),
_NoneConstraint(),
]
def is_satisfied_by(self, val):
return any(c.is_satisfied_by(val) for c in self._constraints)
def __str__(self):
return (
f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
f" {self._constraints[-1]}"
)
class _Booleans(_Constraint):
"""Constraint representing boolean likes.
Convenience class for
[bool, np.bool_, Integral (deprecated)]
"""
def __init__(self):
super().__init__()
self._constraints = [
_InstancesOf(bool),
_InstancesOf(np.bool_),
_InstancesOf(Integral),
]
def is_satisfied_by(self, val):
# TODO(1.4) remove support for Integral.
if isinstance(val, Integral) and not isinstance(val, bool):
warnings.warn(
(
"Passing an int for a boolean parameter is deprecated in "
" version 1.2 and won't be supported anymore in version 1.4."
),
FutureWarning,
)
return any(c.is_satisfied_by(val) for c in self._constraints)
def __str__(self):
return (
f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
f" {self._constraints[-1]}"
)
class _VerboseHelper(_Constraint):
"""Helper constraint for the verbose parameter.
Convenience class for
[Interval(Integral, 0, None, closed="left"), bool, numpy.bool_]
"""
def __init__(self):
super().__init__()
self._constraints = [
Interval(Integral, 0, None, closed="left"),
_InstancesOf(bool),
_InstancesOf(np.bool_),
]
def is_satisfied_by(self, val):
return any(c.is_satisfied_by(val) for c in self._constraints)
def __str__(self):
return (
f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
f" {self._constraints[-1]}"
)
class MissingValues(_Constraint):
"""Helper constraint for the `missing_values` parameters.
Convenience for
[
Integral,
Interval(Real, None, None, closed="both"),
str, # when numeric_only is False
None, # when numeric_only is False
_NanConstraint(),
_PandasNAConstraint(),
]
Parameters
----------
numeric_only : bool, default=False
Whether to consider only numeric missing value markers.
"""
def __init__(self, numeric_only=False):
super().__init__()
self.numeric_only = numeric_only
self._constraints = [
_InstancesOf(Integral),
# we use an interval of Real to ignore np.nan that has its own
# constraint
Interval(Real, None, None, closed="both"),
_NanConstraint(),
_PandasNAConstraint(),
]
if not self.numeric_only:
self._constraints.extend([_InstancesOf(str), _NoneConstraint()])
def is_satisfied_by(self, val):
return any(c.is_satisfied_by(val) for c in self._constraints)
def __str__(self):
return (
f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
f" {self._constraints[-1]}"
)
class HasMethods(_Constraint):
"""Constraint representing objects that expose specific methods.
It is useful for parameters following a protocol and where we don't
want to impose an affiliation to a specific module or class.
Parameters
----------
methods : str or list of str
The method(s) that the object is expected to expose.
"""
@validate_params(
{"methods": [str, list]},
prefer_skip_nested_validation=True,
)
def __init__(self, methods):
super().__init__()
if isinstance(methods, str):
methods = [methods]
self.methods = methods
def is_satisfied_by(self, val):
return all(callable(getattr(val, method, None)) for method in self.methods)
def __str__(self):
if len(self.methods) == 1:
methods = f"{self.methods[0]!r}"
else:
methods = (
f"{', '.join([repr(m) for m in self.methods[:-1]])} and"
f" {self.methods[-1]!r}"
)
return f"an object implementing {methods}"
class _IterablesNotString(_Constraint):
"""Constraint representing iterables that are not strings."""
def is_satisfied_by(self, val):
return isinstance(val, Iterable) and not isinstance(val, str)
def __str__(self):
return "an iterable"
class _CVObjects(_Constraint):
"""Constraint representing cv objects.
Convenient class for
[
Interval(Integral, 2, None, closed="left"),
HasMethods(["split", "get_n_splits"]),
_IterablesNotString(),
None,
]
"""
def __init__(self):
super().__init__()
self._constraints = [
Interval(Integral, 2, None, closed="left"),
HasMethods(["split", "get_n_splits"]),
_IterablesNotString(),
_NoneConstraint(),
]
def is_satisfied_by(self, val):
return any(c.is_satisfied_by(val) for c in self._constraints)
def __str__(self):
return (
f"{', '.join([str(c) for c in self._constraints[:-1]])} or"
f" {self._constraints[-1]}"
)
class Hidden:
"""Class encapsulating a constraint not meant to be exposed to the user.
Parameters
----------
constraint : str or _Constraint instance
The constraint to be used internally.
"""
def __init__(self, constraint):
self.constraint = constraint
def generate_invalid_param_val(constraint):
"""Return a value that does not satisfy the constraint.
Raises a NotImplementedError if there exists no invalid value for this
constraint.
This is only useful for testing purpose.
Parameters
----------
constraint : _Constraint instance
The constraint to generate a value for.
Returns
-------
val : object
A value that does not satisfy the constraint.
"""
if isinstance(constraint, StrOptions):
return f"not {' or '.join(constraint.options)}"
if isinstance(constraint, MissingValues):
return np.array([1, 2, 3])
if isinstance(constraint, _VerboseHelper):
return -1
if isinstance(constraint, HasMethods):
return type("HasNotMethods", (), {})()
if isinstance(constraint, _IterablesNotString):
return "a string"
if isinstance(constraint, _CVObjects):
return "not a cv object"
if isinstance(constraint, Interval) and constraint.type is Integral:
if constraint.left is not None:
return constraint.left - 1
if constraint.right is not None:
return constraint.right + 1
# There's no integer outside (-inf, +inf)
raise NotImplementedError
if isinstance(constraint, Interval) and constraint.type in (Real, RealNotInt):
if constraint.left is not None:
return constraint.left - 1e-6
if constraint.right is not None:
return constraint.right + 1e-6
# bounds are -inf, +inf
if constraint.closed in ("right", "neither"):
return -np.inf
if constraint.closed in ("left", "neither"):
return np.inf
# interval is [-inf, +inf]
return np.nan
raise NotImplementedError
def generate_valid_param(constraint):
"""Return a value that does satisfy a constraint.
This is only useful for testing purpose.
Parameters
----------
constraint : Constraint instance
The constraint to generate a value for.
Returns
-------
val : object
A value that does satisfy the constraint.
"""
if isinstance(constraint, _ArrayLikes):
return np.array([1, 2, 3])
if isinstance(constraint, _SparseMatrices):
return csr_matrix([[0, 1], [1, 0]])
if isinstance(constraint, _RandomStates):
return np.random.RandomState(42)
if isinstance(constraint, _Callables):
return lambda x: x
if isinstance(constraint, _NoneConstraint):
return None
if isinstance(constraint, _InstancesOf):
if constraint.type is np.ndarray:
# special case for ndarray since it can't be instantiated without
# arguments
return np.array([1, 2, 3])
if constraint.type in (Integral, Real):
# special case for Integral and Real since they are abstract classes
return 1
return constraint.type()
if isinstance(constraint, _Booleans):
return True
if isinstance(constraint, _VerboseHelper):
return 1
if isinstance(constraint, MissingValues) and constraint.numeric_only:
return np.nan
if isinstance(constraint, MissingValues) and not constraint.numeric_only:
return "missing"
if isinstance(constraint, HasMethods):
return type(
"ValidHasMethods",
(),
{m: lambda self: None for m in constraint.methods},
)()
if isinstance(constraint, _IterablesNotString):
return [1, 2, 3]
if isinstance(constraint, _CVObjects):
return 5
if isinstance(constraint, Options): # includes StrOptions
for option in constraint.options:
return option
if isinstance(constraint, Interval):
interval = constraint
if interval.left is None and interval.right is None:
return 0
elif interval.left is None:
return interval.right - 1
elif interval.right is None:
return interval.left + 1
else:
if interval.type is Real:
return (interval.left + interval.right) / 2
else:
return interval.left + 1
raise ValueError(f"Unknown constraint type: {constraint}")
else:
from sklearn.utils._param_validation import generate_invalid_param_val # noqa
from sklearn.utils._param_validation import generate_valid_param # noqa
from sklearn.utils._param_validation import validate_parameter_constraints # noqa
from sklearn.utils._param_validation import (
HasMethods,
Hidden,
Interval,
InvalidParameterError,
MissingValues,
Options,
RealNotInt,
StrOptions,
_ArrayLikes,
_Booleans,
_Callables,
_CVObjects,
_InstancesOf,
_IterablesNotString,
_NoneConstraint,
_PandasNAConstraint,
_RandomStates,
_SparseMatrices,
_VerboseHelper,
make_constraint,
validate_params,
)
| 32,856 | 33.880042 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/tests/test_param_validation.py | """This is a copy of sklearn/utils/tests/test_param_validation.py. It can be
removed when we support scikit-learn >= 1.2.
"""
from numbers import Integral, Real
import numpy as np
import pytest
from scipy.sparse import csr_matrix
from sklearn.base import BaseEstimator
from sklearn.model_selection import LeaveOneOut
from sklearn.utils import deprecated
from imblearn._config import config_context, get_config
from imblearn.base import _ParamsValidationMixin
from imblearn.utils._param_validation import (
HasMethods,
Hidden,
Interval,
InvalidParameterError,
MissingValues,
Options,
RealNotInt,
StrOptions,
_ArrayLikes,
_Booleans,
_Callables,
_CVObjects,
_InstancesOf,
_IterablesNotString,
_NoneConstraint,
_PandasNAConstraint,
_RandomStates,
_SparseMatrices,
_VerboseHelper,
generate_invalid_param_val,
generate_valid_param,
make_constraint,
validate_params,
)
from imblearn.utils.fixes import _fit_context
# Some helpers for the tests
@validate_params(
{"a": [Real], "b": [Real], "c": [Real], "d": [Real]},
prefer_skip_nested_validation=True,
)
def _func(a, b=0, *args, c, d=0, **kwargs):
"""A function to test the validation of functions."""
class _Class:
"""A class to test the _InstancesOf constraint and the validation of methods."""
@validate_params({"a": [Real]}, prefer_skip_nested_validation=True)
def _method(self, a):
"""A validated method"""
@deprecated()
@validate_params({"a": [Real]}, prefer_skip_nested_validation=True)
def _deprecated_method(self, a):
"""A deprecated validated method"""
class _Estimator(_ParamsValidationMixin, BaseEstimator):
"""An estimator to test the validation of estimator parameters."""
_parameter_constraints: dict = {"a": [Real]}
def __init__(self, a):
self.a = a
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X=None, y=None):
pass
@pytest.mark.parametrize("interval_type", [Integral, Real])
def test_interval_range(interval_type):
"""Check the range of values depending on closed."""
interval = Interval(interval_type, -2, 2, closed="left")
assert -2 in interval and 2 not in interval
interval = Interval(interval_type, -2, 2, closed="right")
assert -2 not in interval and 2 in interval
interval = Interval(interval_type, -2, 2, closed="both")
assert -2 in interval and 2 in interval
interval = Interval(interval_type, -2, 2, closed="neither")
assert -2 not in interval and 2 not in interval
def test_interval_inf_in_bounds():
"""Check that inf is included iff a bound is closed and set to None.
Only valid for real intervals.
"""
interval = Interval(Real, 0, None, closed="right")
assert np.inf in interval
interval = Interval(Real, None, 0, closed="left")
assert -np.inf in interval
interval = Interval(Real, None, None, closed="neither")
assert np.inf not in interval
assert -np.inf not in interval
@pytest.mark.parametrize(
"interval",
[Interval(Real, 0, 1, closed="left"), Interval(Real, None, None, closed="both")],
)
def test_nan_not_in_interval(interval):
"""Check that np.nan is not in any interval."""
assert np.nan not in interval
@pytest.mark.parametrize(
"params, error, match",
[
(
{"type": Integral, "left": 1.0, "right": 2, "closed": "both"},
TypeError,
r"Expecting left to be an int for an interval over the integers",
),
(
{"type": Integral, "left": 1, "right": 2.0, "closed": "neither"},
TypeError,
"Expecting right to be an int for an interval over the integers",
),
(
{"type": Integral, "left": None, "right": 0, "closed": "left"},
ValueError,
r"left can't be None when closed == left",
),
(
{"type": Integral, "left": 0, "right": None, "closed": "right"},
ValueError,
r"right can't be None when closed == right",
),
(
{"type": Integral, "left": 1, "right": -1, "closed": "both"},
ValueError,
r"right can't be less than left",
),
],
)
def test_interval_errors(params, error, match):
"""Check that informative errors are raised for invalid combination of parameters"""
with pytest.raises(error, match=match):
Interval(**params)
def test_stroptions():
"""Sanity check for the StrOptions constraint"""
options = StrOptions({"a", "b", "c"}, deprecated={"c"})
assert options.is_satisfied_by("a")
assert options.is_satisfied_by("c")
assert not options.is_satisfied_by("d")
assert "'c' (deprecated)" in str(options)
def test_options():
"""Sanity check for the Options constraint"""
options = Options(Real, {-0.5, 0.5, np.inf}, deprecated={-0.5})
assert options.is_satisfied_by(-0.5)
assert options.is_satisfied_by(np.inf)
assert not options.is_satisfied_by(1.23)
assert "-0.5 (deprecated)" in str(options)
@pytest.mark.parametrize(
"type, expected_type_name",
[
(int, "int"),
(Integral, "int"),
(Real, "float"),
(np.ndarray, "numpy.ndarray"),
],
)
def test_instances_of_type_human_readable(type, expected_type_name):
"""Check the string representation of the _InstancesOf constraint."""
constraint = _InstancesOf(type)
assert str(constraint) == f"an instance of '{expected_type_name}'"
def test_hasmethods():
"""Check the HasMethods constraint."""
constraint = HasMethods(["a", "b"])
class _Good:
def a(self):
pass # pragma: no cover
def b(self):
pass # pragma: no cover
class _Bad:
def a(self):
pass # pragma: no cover
assert constraint.is_satisfied_by(_Good())
assert not constraint.is_satisfied_by(_Bad())
assert str(constraint) == "an object implementing 'a' and 'b'"
@pytest.mark.parametrize(
"constraint",
[
Interval(Real, None, 0, closed="left"),
Interval(Real, 0, None, closed="left"),
Interval(Real, None, None, closed="neither"),
StrOptions({"a", "b", "c"}),
MissingValues(),
MissingValues(numeric_only=True),
_VerboseHelper(),
HasMethods("fit"),
_IterablesNotString(),
_CVObjects(),
],
)
def test_generate_invalid_param_val(constraint):
"""Check that the value generated does not satisfy the constraint"""
bad_value = generate_invalid_param_val(constraint)
assert not constraint.is_satisfied_by(bad_value)
@pytest.mark.parametrize(
"integer_interval, real_interval",
[
(
Interval(Integral, None, 3, closed="right"),
Interval(RealNotInt, -5, 5, closed="both"),
),
(
Interval(Integral, None, 3, closed="right"),
Interval(RealNotInt, -5, 5, closed="neither"),
),
(
Interval(Integral, None, 3, closed="right"),
Interval(RealNotInt, 4, 5, closed="both"),
),
(
Interval(Integral, None, 3, closed="right"),
Interval(RealNotInt, 5, None, closed="left"),
),
(
Interval(Integral, None, 3, closed="right"),
Interval(RealNotInt, 4, None, closed="neither"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(RealNotInt, -5, 5, closed="both"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(RealNotInt, -5, 5, closed="neither"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(RealNotInt, 1, 2, closed="both"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(RealNotInt, None, -5, closed="left"),
),
(
Interval(Integral, 3, None, closed="left"),
Interval(RealNotInt, None, -4, closed="neither"),
),
(
Interval(Integral, -5, 5, closed="both"),
Interval(RealNotInt, None, 1, closed="right"),
),
(
Interval(Integral, -5, 5, closed="both"),
Interval(RealNotInt, 1, None, closed="left"),
),
(
Interval(Integral, -5, 5, closed="both"),
Interval(RealNotInt, -10, -4, closed="neither"),
),
(
Interval(Integral, -5, 5, closed="both"),
Interval(RealNotInt, -10, -4, closed="right"),
),
(
Interval(Integral, -5, 5, closed="neither"),
Interval(RealNotInt, 6, 10, closed="neither"),
),
(
Interval(Integral, -5, 5, closed="neither"),
Interval(RealNotInt, 6, 10, closed="left"),
),
(
Interval(Integral, 2, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="both"),
),
(
Interval(Integral, 1, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="both"),
),
],
)
def test_generate_invalid_param_val_2_intervals(integer_interval, real_interval):
"""Check that the value generated for an interval constraint does not satisfy any of
the interval constraints.
"""
bad_value = generate_invalid_param_val(constraint=real_interval)
assert not real_interval.is_satisfied_by(bad_value)
assert not integer_interval.is_satisfied_by(bad_value)
bad_value = generate_invalid_param_val(constraint=integer_interval)
assert not real_interval.is_satisfied_by(bad_value)
assert not integer_interval.is_satisfied_by(bad_value)
@pytest.mark.parametrize(
"constraint",
[
_ArrayLikes(),
_InstancesOf(list),
_Callables(),
_NoneConstraint(),
_RandomStates(),
_SparseMatrices(),
_Booleans(),
Interval(Integral, None, None, closed="neither"),
],
)
def test_generate_invalid_param_val_all_valid(constraint):
"""Check that the function raises NotImplementedError when there's no invalid value
for the constraint.
"""
with pytest.raises(NotImplementedError):
generate_invalid_param_val(constraint)
@pytest.mark.parametrize(
"constraint",
[
_ArrayLikes(),
_Callables(),
_InstancesOf(list),
_NoneConstraint(),
_RandomStates(),
_SparseMatrices(),
_Booleans(),
_VerboseHelper(),
MissingValues(),
MissingValues(numeric_only=True),
StrOptions({"a", "b", "c"}),
Options(Integral, {1, 2, 3}),
Interval(Integral, None, None, closed="neither"),
Interval(Integral, 0, 10, closed="neither"),
Interval(Integral, 0, None, closed="neither"),
Interval(Integral, None, 0, closed="neither"),
Interval(Real, 0, 1, closed="neither"),
Interval(Real, 0, None, closed="both"),
Interval(Real, None, 0, closed="right"),
HasMethods("fit"),
_IterablesNotString(),
_CVObjects(),
],
)
def test_generate_valid_param(constraint):
"""Check that the value generated does satisfy the constraint."""
value = generate_valid_param(constraint)
assert constraint.is_satisfied_by(value)
@pytest.mark.parametrize(
"constraint_declaration, value",
[
(Interval(Real, 0, 1, closed="both"), 0.42),
(Interval(Integral, 0, None, closed="neither"), 42),
(StrOptions({"a", "b", "c"}), "b"),
(Options(type, {np.float32, np.float64}), np.float64),
(callable, lambda x: x + 1),
(None, None),
("array-like", [[1, 2], [3, 4]]),
("array-like", np.array([[1, 2], [3, 4]])),
("sparse matrix", csr_matrix([[1, 2], [3, 4]])),
("random_state", 0),
("random_state", np.random.RandomState(0)),
("random_state", None),
(_Class, _Class()),
(int, 1),
(Real, 0.5),
("boolean", False),
("verbose", 1),
(MissingValues(), -1),
(MissingValues(), -1.0),
(MissingValues(), None),
(MissingValues(), float("nan")),
(MissingValues(), np.nan),
(MissingValues(), "missing"),
(HasMethods("fit"), _Estimator(a=0)),
("cv_object", 5),
],
)
def test_is_satisfied_by(constraint_declaration, value):
"""Sanity check for the is_satisfied_by method"""
constraint = make_constraint(constraint_declaration)
assert constraint.is_satisfied_by(value)
@pytest.mark.parametrize(
"constraint_declaration, expected_constraint_class",
[
(Interval(Real, 0, 1, closed="both"), Interval),
(StrOptions({"option1", "option2"}), StrOptions),
(Options(Real, {0.42, 1.23}), Options),
("array-like", _ArrayLikes),
("sparse matrix", _SparseMatrices),
("random_state", _RandomStates),
(None, _NoneConstraint),
(callable, _Callables),
(int, _InstancesOf),
("boolean", _Booleans),
("verbose", _VerboseHelper),
(MissingValues(numeric_only=True), MissingValues),
(HasMethods("fit"), HasMethods),
("cv_object", _CVObjects),
],
)
def test_make_constraint(constraint_declaration, expected_constraint_class):
"""Check that make_constraint dispaches to the appropriate constraint class"""
constraint = make_constraint(constraint_declaration)
assert constraint.__class__ is expected_constraint_class
def test_make_constraint_unknown():
"""Check that an informative error is raised when an unknown constraint is passed"""
with pytest.raises(ValueError, match="Unknown constraint"):
make_constraint("not a valid constraint")
def test_validate_params():
"""Check that validate_params works no matter how the arguments are passed"""
with pytest.raises(
InvalidParameterError, match="The 'a' parameter of _func must be"
):
_func("wrong", c=1)
with pytest.raises(
InvalidParameterError, match="The 'b' parameter of _func must be"
):
_func(*[1, "wrong"], c=1)
with pytest.raises(
InvalidParameterError, match="The 'c' parameter of _func must be"
):
_func(1, **{"c": "wrong"})
with pytest.raises(
InvalidParameterError, match="The 'd' parameter of _func must be"
):
_func(1, c=1, d="wrong")
# check in the presence of extra positional and keyword args
with pytest.raises(
InvalidParameterError, match="The 'b' parameter of _func must be"
):
_func(0, *["wrong", 2, 3], c=4, **{"e": 5})
with pytest.raises(
InvalidParameterError, match="The 'c' parameter of _func must be"
):
_func(0, *[1, 2, 3], c="four", **{"e": 5})
def test_validate_params_missing_params():
"""Check that no error is raised when there are parameters without
constraints
"""
@validate_params({"a": [int]}, prefer_skip_nested_validation=True)
def func(a, b):
pass
func(1, 2)
def test_decorate_validated_function():
"""Check that validate_params functions can be decorated"""
decorated_function = deprecated()(_func)
with pytest.warns(FutureWarning, match="Function _func is deprecated"):
decorated_function(1, 2, c=3)
# outer decorator does not interfere with validation
with pytest.warns(FutureWarning, match="Function _func is deprecated"):
with pytest.raises(
InvalidParameterError, match=r"The 'c' parameter of _func must be"
):
decorated_function(1, 2, c="wrong")
def test_validate_params_method():
"""Check that validate_params works with methods"""
with pytest.raises(
InvalidParameterError, match="The 'a' parameter of _Class._method must be"
):
_Class()._method("wrong")
# validated method can be decorated
with pytest.warns(FutureWarning, match="Function _deprecated_method is deprecated"):
with pytest.raises(
InvalidParameterError,
match="The 'a' parameter of _Class._deprecated_method must be",
):
_Class()._deprecated_method("wrong")
def test_validate_params_estimator():
"""Check that validate_params works with Estimator instances"""
# no validation in init
est = _Estimator("wrong")
with pytest.raises(
InvalidParameterError, match="The 'a' parameter of _Estimator must be"
):
est.fit()
def test_stroptions_deprecated_subset():
"""Check that the deprecated parameter must be a subset of options."""
with pytest.raises(ValueError, match="deprecated options must be a subset"):
StrOptions({"a", "b", "c"}, deprecated={"a", "d"})
def test_hidden_constraint():
"""Check that internal constraints are not exposed in the error message."""
@validate_params(
{"param": [Hidden(list), dict]}, prefer_skip_nested_validation=True
)
def f(param):
pass
# list and dict are valid params
f({"a": 1, "b": 2, "c": 3})
f([1, 2, 3])
with pytest.raises(
InvalidParameterError, match="The 'param' parameter"
) as exc_info:
f(param="bad")
# the list option is not exposed in the error message
err_msg = str(exc_info.value)
assert "an instance of 'dict'" in err_msg
assert "an instance of 'list'" not in err_msg
def test_hidden_stroptions():
"""Check that we can have 2 StrOptions constraints, one being hidden."""
@validate_params(
{"param": [StrOptions({"auto"}), Hidden(StrOptions({"warn"}))]},
prefer_skip_nested_validation=True,
)
def f(param):
pass
# "auto" and "warn" are valid params
f("auto")
f("warn")
with pytest.raises(
InvalidParameterError, match="The 'param' parameter"
) as exc_info:
f(param="bad")
# the "warn" option is not exposed in the error message
err_msg = str(exc_info.value)
assert "auto" in err_msg
assert "warn" not in err_msg
def test_validate_params_set_param_constraints_attribute():
"""Check that the validate_params decorator properly sets the parameter constraints
as attribute of the decorated function/method.
"""
assert hasattr(_func, "_skl_parameter_constraints")
assert hasattr(_Class()._method, "_skl_parameter_constraints")
def test_boolean_constraint_deprecated_int():
"""Check that validate_params raise a deprecation message but still passes
validation when using an int for a parameter accepting a boolean.
"""
@validate_params({"param": ["boolean"]}, prefer_skip_nested_validation=True)
def f(param):
pass
# True/False and np.bool_(True/False) are valid params
f(True)
f(np.bool_(False))
# an int is also valid but deprecated
with pytest.warns(
FutureWarning, match="Passing an int for a boolean parameter is deprecated"
):
f(1)
def test_no_validation():
"""Check that validation can be skipped for a parameter."""
@validate_params(
{"param1": [int, None], "param2": "no_validation"},
prefer_skip_nested_validation=True,
)
def f(param1=None, param2=None):
pass
# param1 is validated
with pytest.raises(InvalidParameterError, match="The 'param1' parameter"):
f(param1="wrong")
# param2 is not validated: any type is valid.
class SomeType:
pass
f(param2=SomeType)
f(param2=SomeType())
def test_pandas_na_constraint_with_pd_na():
"""Add a specific test for checking support for `pandas.NA`."""
pd = pytest.importorskip("pandas")
na_constraint = _PandasNAConstraint()
assert na_constraint.is_satisfied_by(pd.NA)
assert not na_constraint.is_satisfied_by(np.array([1, 2, 3]))
def test_iterable_not_string():
"""Check that a string does not satisfy the _IterableNotString constraint."""
constraint = _IterablesNotString()
assert constraint.is_satisfied_by([1, 2, 3])
assert constraint.is_satisfied_by(range(10))
assert not constraint.is_satisfied_by("some string")
def test_cv_objects():
"""Check that the _CVObjects constraint accepts all current ways
to pass cv objects."""
constraint = _CVObjects()
assert constraint.is_satisfied_by(5)
assert constraint.is_satisfied_by(LeaveOneOut())
assert constraint.is_satisfied_by([([1, 2], [3, 4]), ([3, 4], [1, 2])])
assert constraint.is_satisfied_by(None)
assert not constraint.is_satisfied_by("not a CV object")
def test_third_party_estimator():
"""Check that the validation from a scikit-learn estimator inherited by a third
party estimator does not impose a match between the dict of constraints and the
parameters of the estimator.
"""
class ThirdPartyEstimator(_Estimator):
def __init__(self, b):
self.b = b
super().__init__(a=0)
def fit(self, X=None, y=None):
super().fit(X, y)
# does not raise, even though "b" is not in the constraints dict and "a" is not
# a parameter of the estimator.
ThirdPartyEstimator(b=0).fit()
def test_interval_real_not_int():
"""Check for the type RealNotInt in the Interval constraint."""
constraint = Interval(RealNotInt, 0, 1, closed="both")
assert constraint.is_satisfied_by(1.0)
assert not constraint.is_satisfied_by(1)
def test_real_not_int():
"""Check for the RealNotInt type."""
assert isinstance(1.0, RealNotInt)
assert not isinstance(1, RealNotInt)
assert isinstance(np.float64(1), RealNotInt)
assert not isinstance(np.int64(1), RealNotInt)
def test_skip_param_validation():
"""Check that param validation can be skipped using config_context."""
@validate_params({"a": [int]}, prefer_skip_nested_validation=True)
def f(a):
pass
with pytest.raises(InvalidParameterError, match="The 'a' parameter"):
f(a="1")
# does not raise
with config_context(skip_parameter_validation=True):
f(a="1")
@pytest.mark.parametrize("prefer_skip_nested_validation", [True, False])
def test_skip_nested_validation(prefer_skip_nested_validation):
"""Check that nested validation can be skipped."""
@validate_params({"a": [int]}, prefer_skip_nested_validation=True)
def f(a):
pass
@validate_params(
{"b": [int]},
prefer_skip_nested_validation=prefer_skip_nested_validation,
)
def g(b):
# calls f with a bad parameter type
return f(a="invalid_param_value")
# Validation for g is never skipped.
with pytest.raises(InvalidParameterError, match="The 'b' parameter"):
g(b="invalid_param_value")
if prefer_skip_nested_validation:
g(b=1) # does not raise because inner f is not validated
else:
with pytest.raises(InvalidParameterError, match="The 'a' parameter"):
g(b=1)
@pytest.mark.parametrize(
"skip_parameter_validation, prefer_skip_nested_validation, expected_skipped",
[
(True, True, True),
(True, False, True),
(False, True, True),
(False, False, False),
],
)
def test_skip_nested_validation_and_config_context(
skip_parameter_validation, prefer_skip_nested_validation, expected_skipped
):
"""Check interaction between global skip and local skip."""
@validate_params(
{"a": [int]}, prefer_skip_nested_validation=prefer_skip_nested_validation
)
def g(a):
return get_config()["skip_parameter_validation"]
with config_context(skip_parameter_validation=skip_parameter_validation):
actual_skipped = g(1)
assert actual_skipped == expected_skipped
| 23,785 | 30.174312 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/tests/test_deprecation.py | """Test for the deprecation helper"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
import pytest
from imblearn.utils.deprecation import deprecate_parameter
class Sampler:
def __init__(self):
self.a = "something"
self.b = "something"
def test_deprecate_parameter():
with pytest.warns(FutureWarning, match="is deprecated from"):
deprecate_parameter(Sampler(), "0.2", "a")
with pytest.warns(FutureWarning, match="Use 'b' instead."):
deprecate_parameter(Sampler(), "0.2", "a", "b")
| 554 | 24.227273 | 65 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/tests/test_show_versions.py | """Test for the show_versions helper. Based on the sklearn tests."""
# Author: Alexander L. Hayes <hayesall@iu.edu>
# License: MIT
from imblearn.utils._show_versions import _get_deps_info, show_versions
def test_get_deps_info():
_deps_info = _get_deps_info()
assert "pip" in _deps_info
assert "setuptools" in _deps_info
assert "imbalanced-learn" in _deps_info
assert "scikit-learn" in _deps_info
assert "numpy" in _deps_info
assert "scipy" in _deps_info
assert "Cython" in _deps_info
assert "pandas" in _deps_info
assert "joblib" in _deps_info
def test_show_versions_default(capsys):
show_versions()
out, err = capsys.readouterr()
assert "python" in out
assert "executable" in out
assert "machine" in out
assert "pip" in out
assert "setuptools" in out
assert "imbalanced-learn" in out
assert "scikit-learn" in out
assert "numpy" in out
assert "scipy" in out
assert "Cython" in out
assert "pandas" in out
assert "keras" in out
assert "tensorflow" in out
assert "joblib" in out
def test_show_versions_github(capsys):
show_versions(github=True)
out, err = capsys.readouterr()
assert "<details><summary>System, Dependency Information</summary>" in out
assert "**System Information**" in out
assert "* python" in out
assert "* executable" in out
assert "* machine" in out
assert "**Python Dependencies**" in out
assert "* pip" in out
assert "* setuptools" in out
assert "* imbalanced-learn" in out
assert "* scikit-learn" in out
assert "* numpy" in out
assert "* scipy" in out
assert "* Cython" in out
assert "* pandas" in out
assert "* keras" in out
assert "* tensorflow" in out
assert "* joblib" in out
assert "</details>" in out
| 1,818 | 28.819672 | 78 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/tests/test_estimator_checks.py | import numpy as np
import pytest
from sklearn.base import BaseEstimator
from sklearn.utils.multiclass import check_classification_targets
from imblearn.base import BaseSampler
from imblearn.over_sampling.base import BaseOverSampler
from imblearn.utils import check_target_type as target_check
from imblearn.utils.estimator_checks import (
check_samplers_fit,
check_samplers_nan,
check_samplers_one_label,
check_samplers_preserve_dtype,
check_samplers_sparse,
check_samplers_string,
check_target_type,
)
class BaseBadSampler(BaseEstimator):
"""Sampler without inputs checking."""
_sampling_type = "bypass"
def fit(self, X, y):
return self
def fit_resample(self, X, y):
check_classification_targets(y)
self.fit(X, y)
return X, y
class SamplerSingleClass(BaseSampler):
"""Sampler that would sample even with a single class."""
_sampling_type = "bypass"
def fit_resample(self, X, y):
return self._fit_resample(X, y)
def _fit_resample(self, X, y):
return X, y
class NotFittedSampler(BaseBadSampler):
"""Sampler without target checking."""
def fit(self, X, y):
X, y = self._validate_data(X, y)
return self
class NoAcceptingSparseSampler(BaseBadSampler):
"""Sampler which does not accept sparse matrix."""
def fit(self, X, y):
X, y = self._validate_data(X, y)
self.sampling_strategy_ = "sampling_strategy_"
return self
class NotPreservingDtypeSampler(BaseSampler):
_sampling_type = "bypass"
_parameter_constraints: dict = {"sampling_strategy": "no_validation"}
def _fit_resample(self, X, y):
return X.astype(np.float64), y.astype(np.int64)
class IndicesSampler(BaseOverSampler):
def _check_X_y(self, X, y):
y, binarize_y = target_check(y, indicate_one_vs_all=True)
X, y = self._validate_data(
X,
y,
reset=True,
dtype=None,
force_all_finite=False,
)
return X, y, binarize_y
def _fit_resample(self, X, y):
n_max_count_class = np.bincount(y).max()
indices = np.random.choice(np.arange(X.shape[0]), size=n_max_count_class * 2)
return X[indices], y[indices]
def test_check_samplers_string():
sampler = IndicesSampler()
check_samplers_string(sampler.__class__.__name__, sampler)
def test_check_samplers_nan():
sampler = IndicesSampler()
check_samplers_nan(sampler.__class__.__name__, sampler)
mapping_estimator_error = {
"BaseBadSampler": (AssertionError, "ValueError not raised by fit"),
"SamplerSingleClass": (AssertionError, "Sampler can't balance when only"),
"NotFittedSampler": (AssertionError, "No fitted attribute"),
"NoAcceptingSparseSampler": (TypeError, "A sparse matrix was passed"),
"NotPreservingDtypeSampler": (AssertionError, "X dtype is not preserved"),
}
def _test_single_check(Estimator, check):
estimator = Estimator()
name = estimator.__class__.__name__
err_type, err_msg = mapping_estimator_error[name]
with pytest.raises(err_type, match=err_msg):
check(name, estimator)
def test_all_checks():
_test_single_check(BaseBadSampler, check_target_type)
_test_single_check(SamplerSingleClass, check_samplers_one_label)
_test_single_check(NotFittedSampler, check_samplers_fit)
_test_single_check(NoAcceptingSparseSampler, check_samplers_sparse)
_test_single_check(NotPreservingDtypeSampler, check_samplers_preserve_dtype)
| 3,546 | 27.837398 | 85 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/tests/test_docstring.py | """Test utilities for docstring."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
import pytest
from imblearn.utils import Substitution
from imblearn.utils._docstring import _n_jobs_docstring, _random_state_docstring
func_docstring = """A function.
Parameters
----------
xxx
yyy
"""
def func(param_1, param_2):
"""A function.
Parameters
----------
{param_1}
{param_2}
"""
return param_1, param_2
cls_docstring = """A class.
Parameters
----------
xxx
yyy
"""
class cls:
"""A class.
Parameters
----------
{param_1}
{param_2}
"""
def __init__(self, param_1, param_2):
self.param_1 = param_1
self.param_2 = param_2
@pytest.mark.parametrize(
"obj, obj_docstring", [(func, func_docstring), (cls, cls_docstring)]
)
def test_docstring_inject(obj, obj_docstring):
obj_injected_docstring = Substitution(param_1="xxx", param_2="yyy")(obj)
assert obj_injected_docstring.__doc__ == obj_docstring
def test_docstring_template():
assert "random_state" in _random_state_docstring
assert "n_jobs" in _n_jobs_docstring
def test_docstring_with_python_OO():
"""Check that we don't raise a warning if the code is executed with -OO.
Non-regression test for:
https://github.com/scikit-learn-contrib/imbalanced-learn/issues/945
"""
instance = cls(param_1="xxx", param_2="yyy")
instance.__doc__ = None # simulate -OO
instance = Substitution(param_1="xxx", param_2="yyy")(instance)
assert instance.__doc__ is None
| 1,604 | 18.337349 | 80 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/tests/__init__.py | 0 | 0 | 0 | py | |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/tests/test_validation.py | """Test for the validation helper"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from collections import Counter, OrderedDict
import numpy as np
import pytest
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors._base import KNeighborsMixin
from sklearn.utils._testing import assert_array_equal
from imblearn.utils import (
check_neighbors_object,
check_sampling_strategy,
check_target_type,
)
from imblearn.utils._validation import (
ArraysTransformer,
_deprecate_positional_args,
_is_neighbors_object,
)
from imblearn.utils.testing import _CustomNearestNeighbors
multiclass_target = np.array([1] * 50 + [2] * 100 + [3] * 25)
binary_target = np.array([1] * 25 + [0] * 100)
def test_check_neighbors_object():
name = "n_neighbors"
n_neighbors = 1
estimator = check_neighbors_object(name, n_neighbors)
assert issubclass(type(estimator), KNeighborsMixin)
assert estimator.n_neighbors == 1
estimator = check_neighbors_object(name, n_neighbors, 1)
assert issubclass(type(estimator), KNeighborsMixin)
assert estimator.n_neighbors == 2
estimator = NearestNeighbors(n_neighbors=n_neighbors)
estimator_cloned = check_neighbors_object(name, estimator)
assert estimator.n_neighbors == estimator_cloned.n_neighbors
estimator = _CustomNearestNeighbors()
estimator_cloned = check_neighbors_object(name, estimator)
assert isinstance(estimator_cloned, _CustomNearestNeighbors)
@pytest.mark.parametrize(
"target, output_target",
[
(np.array([0, 1, 1]), np.array([0, 1, 1])),
(np.array([0, 1, 2]), np.array([0, 1, 2])),
(np.array([[0, 1], [1, 0]]), np.array([1, 0])),
],
)
def test_check_target_type(target, output_target):
converted_target = check_target_type(target.astype(int))
assert_array_equal(converted_target, output_target.astype(int))
@pytest.mark.parametrize(
"target, output_target, is_ova",
[
(np.array([0, 1, 1]), np.array([0, 1, 1]), False),
(np.array([0, 1, 2]), np.array([0, 1, 2]), False),
(np.array([[0, 1], [1, 0]]), np.array([1, 0]), True),
],
)
def test_check_target_type_ova(target, output_target, is_ova):
converted_target, binarize_target = check_target_type(
target.astype(int), indicate_one_vs_all=True
)
assert_array_equal(converted_target, output_target.astype(int))
assert binarize_target == is_ova
def test_check_sampling_strategy_warning():
msg = "dict for cleaning methods is not supported"
with pytest.raises(ValueError, match=msg):
check_sampling_strategy({1: 0, 2: 0, 3: 0}, multiclass_target, "clean-sampling")
@pytest.mark.parametrize(
"ratio, y, type, err_msg",
[
(
0.5,
binary_target,
"clean-sampling",
"'clean-sampling' methods do let the user specify the sampling ratio", # noqa
),
(
0.1,
np.array([0] * 10 + [1] * 20),
"over-sampling",
"remove samples from the minority class while trying to generate new", # noqa
),
(
0.1,
np.array([0] * 10 + [1] * 20),
"under-sampling",
"generate new sample in the majority class while trying to remove",
),
],
)
def test_check_sampling_strategy_float_error(ratio, y, type, err_msg):
with pytest.raises(ValueError, match=err_msg):
check_sampling_strategy(ratio, y, type)
def test_check_sampling_strategy_error():
with pytest.raises(ValueError, match="'sampling_type' should be one of"):
check_sampling_strategy("auto", np.array([1, 2, 3]), "rnd")
error_regex = "The target 'y' needs to have more than 1 class."
with pytest.raises(ValueError, match=error_regex):
check_sampling_strategy("auto", np.ones((10,)), "over-sampling")
error_regex = "When 'sampling_strategy' is a string, it needs to be one of"
with pytest.raises(ValueError, match=error_regex):
check_sampling_strategy("rnd", np.array([1, 2, 3]), "over-sampling")
@pytest.mark.parametrize(
"sampling_strategy, sampling_type, err_msg",
[
("majority", "over-sampling", "over-sampler"),
("minority", "under-sampling", "under-sampler"),
],
)
def test_check_sampling_strategy_error_wrong_string(
sampling_strategy, sampling_type, err_msg
):
with pytest.raises(
ValueError,
match=("'{}' cannot be used with {}".format(sampling_strategy, err_msg)),
):
check_sampling_strategy(sampling_strategy, np.array([1, 2, 3]), sampling_type)
@pytest.mark.parametrize(
"sampling_strategy, sampling_method",
[
({10: 10}, "under-sampling"),
({10: 10}, "over-sampling"),
([10], "clean-sampling"),
],
)
def test_sampling_strategy_class_target_unknown(sampling_strategy, sampling_method):
y = np.array([1] * 50 + [2] * 100 + [3] * 25)
with pytest.raises(ValueError, match="are not present in the data."):
check_sampling_strategy(sampling_strategy, y, sampling_method)
def test_sampling_strategy_dict_error():
y = np.array([1] * 50 + [2] * 100 + [3] * 25)
sampling_strategy = {1: -100, 2: 50, 3: 25}
with pytest.raises(ValueError, match="in a class cannot be negative."):
check_sampling_strategy(sampling_strategy, y, "under-sampling")
sampling_strategy = {1: 45, 2: 100, 3: 70}
error_regex = (
"With over-sampling methods, the number of samples in a"
" class should be greater or equal to the original number"
" of samples. Originally, there is 50 samples and 45"
" samples are asked."
)
with pytest.raises(ValueError, match=error_regex):
check_sampling_strategy(sampling_strategy, y, "over-sampling")
error_regex = (
"With under-sampling methods, the number of samples in a"
" class should be less or equal to the original number of"
" samples. Originally, there is 25 samples and 70 samples"
" are asked."
)
with pytest.raises(ValueError, match=error_regex):
check_sampling_strategy(sampling_strategy, y, "under-sampling")
@pytest.mark.parametrize("sampling_strategy", [-10, 10])
def test_sampling_strategy_float_error_not_in_range(sampling_strategy):
y = np.array([1] * 50 + [2] * 100)
with pytest.raises(ValueError, match="it should be in the range"):
check_sampling_strategy(sampling_strategy, y, "under-sampling")
def test_sampling_strategy_float_error_not_binary():
y = np.array([1] * 50 + [2] * 100 + [3] * 25)
with pytest.raises(ValueError, match="the type of target is binary"):
sampling_strategy = 0.5
check_sampling_strategy(sampling_strategy, y, "under-sampling")
@pytest.mark.parametrize("sampling_method", ["over-sampling", "under-sampling"])
def test_sampling_strategy_list_error_not_clean_sampling(sampling_method):
y = np.array([1] * 50 + [2] * 100 + [3] * 25)
with pytest.raises(ValueError, match="cannot be a list for samplers"):
sampling_strategy = [1, 2, 3]
check_sampling_strategy(sampling_strategy, y, sampling_method)
def _sampling_strategy_func(y):
# this function could create an equal number of samples
target_stats = Counter(y)
n_samples = max(target_stats.values())
return {key: int(n_samples) for key in target_stats.keys()}
@pytest.mark.parametrize(
"sampling_strategy, sampling_type, expected_sampling_strategy, target",
[
("auto", "under-sampling", {1: 25, 2: 25}, multiclass_target),
("auto", "clean-sampling", {1: 25, 2: 25}, multiclass_target),
("auto", "over-sampling", {1: 50, 3: 75}, multiclass_target),
("all", "over-sampling", {1: 50, 2: 0, 3: 75}, multiclass_target),
("all", "under-sampling", {1: 25, 2: 25, 3: 25}, multiclass_target),
("all", "clean-sampling", {1: 25, 2: 25, 3: 25}, multiclass_target),
("majority", "under-sampling", {2: 25}, multiclass_target),
("majority", "clean-sampling", {2: 25}, multiclass_target),
("minority", "over-sampling", {3: 75}, multiclass_target),
("not minority", "over-sampling", {1: 50, 2: 0}, multiclass_target),
("not minority", "under-sampling", {1: 25, 2: 25}, multiclass_target),
("not minority", "clean-sampling", {1: 25, 2: 25}, multiclass_target),
("not majority", "over-sampling", {1: 50, 3: 75}, multiclass_target),
("not majority", "under-sampling", {1: 25, 3: 25}, multiclass_target),
("not majority", "clean-sampling", {1: 25, 3: 25}, multiclass_target),
(
{1: 70, 2: 100, 3: 70},
"over-sampling",
{1: 20, 2: 0, 3: 45},
multiclass_target,
),
(
{1: 30, 2: 45, 3: 25},
"under-sampling",
{1: 30, 2: 45, 3: 25},
multiclass_target,
),
([1], "clean-sampling", {1: 25}, multiclass_target),
(
_sampling_strategy_func,
"over-sampling",
{1: 50, 2: 0, 3: 75},
multiclass_target,
),
(0.5, "over-sampling", {1: 25}, binary_target),
(0.5, "under-sampling", {0: 50}, binary_target),
],
)
def test_check_sampling_strategy(
sampling_strategy, sampling_type, expected_sampling_strategy, target
):
sampling_strategy_ = check_sampling_strategy(
sampling_strategy, target, sampling_type
)
assert sampling_strategy_ == expected_sampling_strategy
def test_sampling_strategy_callable_args():
y = np.array([1] * 50 + [2] * 100 + [3] * 25)
multiplier = {1: 1.5, 2: 1, 3: 3}
def sampling_strategy_func(y, multiplier):
"""samples such that each class will be affected by the multiplier."""
target_stats = Counter(y)
return {
key: int(values * multiplier[key]) for key, values in target_stats.items()
}
sampling_strategy_ = check_sampling_strategy(
sampling_strategy_func, y, "over-sampling", multiplier=multiplier
)
assert sampling_strategy_ == {1: 25, 2: 0, 3: 50}
@pytest.mark.parametrize(
"sampling_strategy, sampling_type, expected_result",
[
(
{3: 25, 1: 25, 2: 25},
"under-sampling",
OrderedDict({1: 25, 2: 25, 3: 25}),
),
(
{3: 100, 1: 100, 2: 100},
"over-sampling",
OrderedDict({1: 50, 2: 0, 3: 75}),
),
],
)
def test_sampling_strategy_check_order(
sampling_strategy, sampling_type, expected_result
):
# We pass on purpose a non sorted dictionary and check that the resulting
# dictionary is sorted. Refer to issue #428.
y = np.array([1] * 50 + [2] * 100 + [3] * 25)
sampling_strategy_ = check_sampling_strategy(sampling_strategy, y, sampling_type)
assert sampling_strategy_ == expected_result
def test_arrays_transformer_plain_list():
X = np.array([[0, 0], [1, 1]])
y = np.array([[0, 0], [1, 1]])
arrays_transformer = ArraysTransformer(X.tolist(), y.tolist())
X_res, y_res = arrays_transformer.transform(X, y)
assert isinstance(X_res, list)
assert isinstance(y_res, list)
def test_arrays_transformer_numpy():
X = np.array([[0, 0], [1, 1]])
y = np.array([[0, 0], [1, 1]])
arrays_transformer = ArraysTransformer(X, y)
X_res, y_res = arrays_transformer.transform(X, y)
assert isinstance(X_res, np.ndarray)
assert isinstance(y_res, np.ndarray)
def test_arrays_transformer_pandas():
pd = pytest.importorskip("pandas")
X = np.array([[0, 0], [1, 1]])
y = np.array([0, 1])
X_df = pd.DataFrame(X, columns=["a", "b"])
X_df = X_df.astype(int)
y_df = pd.DataFrame(y, columns=["target"])
y_df = y_df.astype(int)
y_s = pd.Series(y, name="target", dtype=int)
# DataFrame and DataFrame case
arrays_transformer = ArraysTransformer(X_df, y_df)
X_res, y_res = arrays_transformer.transform(X, y)
assert isinstance(X_res, pd.DataFrame)
assert_array_equal(X_res.columns, X_df.columns)
assert_array_equal(X_res.dtypes, X_df.dtypes)
assert isinstance(y_res, pd.DataFrame)
assert_array_equal(y_res.columns, y_df.columns)
assert_array_equal(y_res.dtypes, y_df.dtypes)
# DataFrames and Series case
arrays_transformer = ArraysTransformer(X_df, y_s)
_, y_res = arrays_transformer.transform(X, y)
assert isinstance(y_res, pd.Series)
assert_array_equal(y_res.name, y_s.name)
assert_array_equal(y_res.dtype, y_s.dtype)
def test_deprecate_positional_args_warns_for_function():
@_deprecate_positional_args
def f1(a, b, *, c=1, d=1):
pass
with pytest.warns(FutureWarning, match=r"Pass c=3 as keyword args"):
f1(1, 2, 3)
with pytest.warns(FutureWarning, match=r"Pass c=3, d=4 as keyword args"):
f1(1, 2, 3, 4)
@_deprecate_positional_args
def f2(a=1, *, b=1, c=1, d=1):
pass
with pytest.warns(FutureWarning, match=r"Pass b=2 as keyword args"):
f2(1, 2)
# The * is place before a keyword only argument without a default value
@_deprecate_positional_args
def f3(a, *, b, c=1, d=1):
pass
with pytest.warns(FutureWarning, match=r"Pass b=2 as keyword args"):
f3(1, 2)
@pytest.mark.parametrize(
"estimator, is_neighbor_estimator", [(NearestNeighbors(), True), (KMeans(), False)]
)
def test_is_neighbors_object(estimator, is_neighbor_estimator):
assert _is_neighbors_object(estimator) == is_neighbor_estimator
| 13,629 | 34.587467 | 90 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/tests/test_min_dependencies.py | """Tests for the minimum dependencies in the README.rst file."""
import os
import platform
import re
from pathlib import Path
import pytest
from sklearn.utils.fixes import parse_version
import imblearn
from imblearn._min_dependencies import dependent_packages
@pytest.mark.skipif(
platform.system() == "Windows", reason="This test is enough on unix system"
)
def test_min_dependencies_readme():
# Test that the minimum dependencies in the README.rst file are
# consistent with the minimum dependencies defined at the file:
# imblearn/_min_dependencies.py
pattern = re.compile(
r"(\.\. \|)"
+ r"(([A-Za-z]+\-?)+)"
+ r"(MinVersion\| replace::)"
+ r"( [0-9]+\.[0-9]+(\.[0-9]+)?)"
)
readme_path = Path(imblearn.__path__[0]).parents[0]
readme_file = readme_path / "README.rst"
if not os.path.exists(readme_file):
# Skip the test if the README.rst file is not available.
# For instance, when installing scikit-learn from wheels
pytest.skip("The README.rst file is not available.")
with readme_file.open("r") as f:
for line in f:
matched = pattern.match(line)
if not matched:
continue
package, version = matched.group(2), matched.group(5)
package = package.lower()
if package in dependent_packages:
version = parse_version(version)
min_version = parse_version(dependent_packages[package][0])
assert version == min_version, f"{package} has a mismatched version"
| 1,594 | 29.673077 | 84 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/utils/tests/test_testing.py | """Test for the testing module"""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numpy as np
import pytest
from sklearn.neighbors._base import KNeighborsMixin
from imblearn.base import SamplerMixin
from imblearn.utils.testing import _CustomNearestNeighbors, all_estimators
def test_all_estimators():
# check if the filtering is working with a list or a single string
type_filter = "sampler"
all_estimators(type_filter=type_filter)
type_filter = ["sampler"]
estimators = all_estimators(type_filter=type_filter)
for estimator in estimators:
# check that all estimators are sampler
assert issubclass(estimator[1], SamplerMixin)
# check that an error is raised when the type is unknown
type_filter = "rnd"
with pytest.raises(ValueError, match="Parameter type_filter must be 'sampler'"):
all_estimators(type_filter=type_filter)
def test_custom_nearest_neighbors():
"""Check that our custom nearest neighbors can be used for our internal
duck-typing."""
neareat_neighbors = _CustomNearestNeighbors(n_neighbors=3)
assert not isinstance(neareat_neighbors, KNeighborsMixin)
assert hasattr(neareat_neighbors, "kneighbors")
assert hasattr(neareat_neighbors, "kneighbors_graph")
rng = np.random.RandomState(42)
X = rng.randn(150, 3)
y = rng.randint(0, 2, 150)
neareat_neighbors.fit(X, y)
distances, indices = neareat_neighbors.kneighbors(X)
assert distances.shape == (150, 3)
assert indices.shape == (150, 3)
np.testing.assert_allclose(distances[:, 0], 0.0)
np.testing.assert_allclose(indices[:, 0], np.arange(150))
| 1,692 | 32.86 | 84 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/ensemble/_forest.py | """Forest classifiers trained on balanced boostrasp samples."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# License: MIT
import numbers
from copy import deepcopy
from warnings import warn
import numpy as np
import sklearn
from numpy import float32 as DTYPE
from numpy import float64 as DOUBLE
from scipy.sparse import issparse
from sklearn.base import clone, is_classifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble._base import _set_random_states
from sklearn.ensemble._forest import (
_generate_unsampled_indices,
_get_n_samples_bootstrap,
_parallel_build_trees,
)
from sklearn.exceptions import DataConversionWarning
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import _safe_indexing, check_random_state, parse_version
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import _check_sample_weight
try:
# scikit-learn >= 1.2
from sklearn.utils.parallel import Parallel, delayed
except (ImportError, ModuleNotFoundError):
from joblib import Parallel
from sklearn.utils.fixes import delayed
from ..base import _ParamsValidationMixin
from ..pipeline import make_pipeline
from ..under_sampling import RandomUnderSampler
from ..utils import Substitution
from ..utils._docstring import _n_jobs_docstring, _random_state_docstring
from ..utils._param_validation import Hidden, Interval, StrOptions
from ..utils._validation import check_sampling_strategy
from ..utils.fixes import _fit_context
from ._common import _random_forest_classifier_parameter_constraints
MAX_INT = np.iinfo(np.int32).max
sklearn_version = parse_version(sklearn.__version__)
def _local_parallel_build_trees(
sampler,
tree,
bootstrap,
X,
y,
sample_weight,
tree_idx,
n_trees,
verbose=0,
class_weight=None,
n_samples_bootstrap=None,
forest=None,
):
# resample before to fit the tree
X_resampled, y_resampled = sampler.fit_resample(X, y)
if sample_weight is not None:
sample_weight = _safe_indexing(sample_weight, sampler.sample_indices_)
if _get_n_samples_bootstrap is not None:
n_samples_bootstrap = min(n_samples_bootstrap, X_resampled.shape[0])
if sklearn_version >= parse_version("1.1"):
tree = _parallel_build_trees(
tree,
bootstrap,
X_resampled,
y_resampled,
sample_weight,
tree_idx,
n_trees,
verbose=verbose,
class_weight=class_weight,
n_samples_bootstrap=n_samples_bootstrap,
)
else:
# TODO: remove when the minimum version of scikit-learn supported is 1.1
tree = _parallel_build_trees(
tree,
forest,
X_resampled,
y_resampled,
sample_weight,
tree_idx,
n_trees,
verbose=verbose,
class_weight=class_weight,
n_samples_bootstrap=n_samples_bootstrap,
)
return sampler, tree
@Substitution(
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class BalancedRandomForestClassifier(_ParamsValidationMixin, RandomForestClassifier):
"""A balanced random forest classifier.
A balanced random forest differs from a classical random forest by the
fact that it will draw a bootstrap sample from the minority class and
sample with replacement the same number of samples from the majority
class.
Read more in the :ref:`User Guide <forest>`.
.. versionadded:: 0.4
Parameters
----------
n_estimators : int, default=100
The number of trees in the forest.
criterion : {{"gini", "entropy"}}, default="gini"
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_depth : int, default=None
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
min_samples_split : int or float, default=2
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int or float, default=1
The minimum number of samples required to be at a leaf node:
- If int, then consider ``min_samples_leaf`` as the minimum number.
- If float, then ``min_samples_leaf`` is a fraction and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, default=0.0
The minimum weighted fraction of the sum total of weights (of all
the input samples) required to be at a leaf node. Samples have
equal weight when sample_weight is not provided.
max_features : {{"auto", "sqrt", "log2"}}, int, float, or None, \
default="sqrt"
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int, default=None
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
min_impurity_decrease : float, default=0.0
A node will be split if this split induces a decrease of the impurity
greater than or equal to this value.
The weighted impurity decrease equation is the following::
N_t / N * (impurity - N_t_R / N_t * right_impurity
- N_t_L / N_t * left_impurity)
where ``N`` is the total number of samples, ``N_t`` is the number of
samples at the current node, ``N_t_L`` is the number of samples in the
left child, and ``N_t_R`` is the number of samples in the right child.
``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
if ``sample_weight`` is passed.
bootstrap : bool, default=True
Whether bootstrap samples are used when building trees.
.. versionchanged:: 0.13
The default of `bootstrap` will change from `True` to `False` in
version 0.13. Bootstrapping is already taken care by the internal
sampler using `replacement=True`. This implementation follows the
algorithm proposed in [1]_.
oob_score : bool, default=False
Whether to use out-of-bag samples to estimate
the generalization accuracy.
sampling_strategy : float, str, dict, callable, default="auto"
Sampling information to sample the data set.
- When ``float``, it corresponds to the desired ratio of the number of
samples in the minority class over the number of samples in the
majority class after resampling. Therefore, the ratio is expressed as
:math:`\\alpha_{{us}} = N_{{m}} / N_{{rM}}` where :math:`N_{{m}}` is the
number of samples in the minority class and
:math:`N_{{rM}}` is the number of samples in the majority class
after resampling.
.. warning::
``float`` is only available for **binary** classification. An
error is raised for multi-class classification.
- When ``str``, specify the class targeted by the resampling. The
number of samples in the different classes will be equalized.
Possible choices are:
``'majority'``: resample only the majority class;
``'not minority'``: resample all classes but the minority class;
``'not majority'``: resample all classes but the majority class;
``'all'``: resample all classes;
``'auto'``: equivalent to ``'not minority'``.
- When ``dict``, the keys correspond to the targeted classes. The
values correspond to the desired number of samples for each targeted
class.
- When callable, function taking ``y`` and returns a ``dict``. The keys
correspond to the targeted classes. The values correspond to the
desired number of samples for each class.
.. versionchanged:: 0.11
The default of `sampling_strategy` will change from `"auto"` to
`"all"` in version 0.13. This forces to use a bootstrap of the
minority class as proposed in [1]_.
replacement : bool, default=False
Whether or not to sample randomly with replacement or not.
.. versionchanged:: 0.11
The default of `replacement` will change from `False` to `True` in
version 0.13. This forces to use a bootstrap of the
minority class and draw with replacement as proposed in [1]_.
{n_jobs}
{random_state}
verbose : int, default=0
Controls the verbosity of the tree building process.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, {{"balanced", "balanced_subsample"}}, \
default=None
Weights associated with classes in the form dictionary with the key
being the class_label and the value the weight.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
Note that for multioutput (including multilabel) weights should be
defined for each class of every column in its own dict. For example,
for four-class multilabel classification weights should be
[{{0: 1, 1: 1}}, {{0: 1, 1: 5}}, {{0: 1, 1: 1}}, {{0: 1, 1: 1}}]
instead of [{{1:1}}, {{2:5}}, {{3:1}}, {{4:1}}].
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
ccp_alpha : non-negative float, default=0.0
Complexity parameter used for Minimal Cost-Complexity Pruning. The
subtree with the largest cost complexity that is smaller than
``ccp_alpha`` will be chosen. By default, no pruning is performed.
.. versionadded:: 0.6
Added in `scikit-learn` in 0.22
max_samples : int or float, default=None
If bootstrap is True, the number of samples to draw from X
to train each base estimator.
- If None (default), then draw `X.shape[0]` samples.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples. Thus,
`max_samples` should be in the interval `(0, 1)`.
Be aware that the final number samples used will be the minimum between
the number of samples given in `max_samples` and the number of samples
obtained after resampling.
.. versionadded:: 0.6
Added in `scikit-learn` in 0.22
Attributes
----------
estimator_ : :class:`~sklearn.tree.DecisionTreeClassifier` instance
The child estimator template used to create the collection of fitted
sub-estimators.
.. versionadded:: 0.10
base_estimator_ : :class:`~sklearn.tree.DecisionTreeClassifier` instance
The child estimator template used to create the collection of fitted
sub-estimators.
.. deprecated:: 1.2
`base_estimator_` is deprecated in `scikit-learn` 1.2 and will be
removed in 1.4. Use `estimator_` instead. When the minimum version
of `scikit-learn` supported by `imbalanced-learn` will reach 1.4,
this attribute will be removed.
estimators_ : list of :class:`~sklearn.tree.DecisionTreeClassifier`
The collection of fitted sub-estimators.
base_sampler_ : :class:`~imblearn.under_sampling.RandomUnderSampler`
The base sampler used to construct the subsequent list of samplers.
samplers_ : list of :class:`~imblearn.under_sampling.RandomUnderSampler`
The collection of fitted samplers.
pipelines_ : list of Pipeline.
The collection of fitted pipelines (samplers + trees).
classes_ : ndarray of shape (n_classes,) or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when `fit` is performed.
.. deprecated:: 1.0
`n_features_` is deprecated in `scikit-learn` 1.0 and will be removed
in version 1.2. When the minimum version of `scikit-learn` supported
by `imbalanced-learn` will reach 1.2, this attribute will be removed.
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.9
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : ndarray of shape (n_features,)
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : ndarray of shape (n_samples, n_classes)
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
See Also
--------
BalancedBaggingClassifier : Bagging classifier for which each base
estimator is trained on a balanced bootstrap.
EasyEnsembleClassifier : Ensemble of AdaBoost classifier trained on
balanced bootstraps.
RUSBoostClassifier : AdaBoost classifier were each bootstrap is balanced
using random-under sampling at each round of boosting.
References
----------
.. [1] Chen, Chao, Andy Liaw, and Leo Breiman. "Using random forest to
learn imbalanced data." University of California, Berkeley 110 (2004):
1-12.
Examples
--------
>>> from imblearn.ensemble import BalancedRandomForestClassifier
>>> from sklearn.datasets import make_classification
>>>
>>> X, y = make_classification(n_samples=1000, n_classes=3,
... n_informative=4, weights=[0.2, 0.3, 0.5],
... random_state=0)
>>> clf = BalancedRandomForestClassifier(
... sampling_strategy="all", replacement=True, max_depth=2, random_state=0,
... bootstrap=False)
>>> clf.fit(X, y)
BalancedRandomForestClassifier(...)
>>> print(clf.feature_importances_)
[...]
>>> print(clf.predict([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
... 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
[1]
"""
# make a deepcopy to not modify the original dictionary
if sklearn_version >= parse_version("1.3"):
_parameter_constraints = deepcopy(RandomForestClassifier._parameter_constraints)
else:
_parameter_constraints = deepcopy(
_random_forest_classifier_parameter_constraints
)
_parameter_constraints.update(
{
"bootstrap": ["boolean", Hidden(StrOptions({"warn"}))],
"sampling_strategy": [
Interval(numbers.Real, 0, 1, closed="right"),
StrOptions({"auto", "majority", "not minority", "not majority", "all"}),
dict,
callable,
Hidden(StrOptions({"warn"})),
],
"replacement": ["boolean", Hidden(StrOptions({"warn"}))],
}
)
def __init__(
self,
n_estimators=100,
*,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.0,
max_features="sqrt",
max_leaf_nodes=None,
min_impurity_decrease=0.0,
bootstrap="warn",
oob_score=False,
sampling_strategy="warn",
replacement="warn",
n_jobs=None,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None,
ccp_alpha=0.0,
max_samples=None,
):
super().__init__(
criterion=criterion,
max_depth=max_depth,
n_estimators=n_estimators,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=min_impurity_decrease,
ccp_alpha=ccp_alpha,
max_samples=max_samples,
)
self.sampling_strategy = sampling_strategy
self.replacement = replacement
def _validate_estimator(self, default=DecisionTreeClassifier()):
"""Check the estimator and the n_estimator attribute, set the
`estimator_` attribute."""
if hasattr(self, "estimator"):
base_estimator = self.estimator
else:
base_estimator = self.base_estimator
if base_estimator is not None:
self._estimator = clone(base_estimator)
else:
self._estimator = clone(default)
try:
# scikit-learn < 1.2
self.base_estimator_ = self._estimator
except AttributeError:
pass
self.base_sampler_ = RandomUnderSampler(
sampling_strategy=self._sampling_strategy,
replacement=self._replacement,
)
def _make_sampler_estimator(self, random_state=None):
"""Make and configure a copy of the `base_estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
"""
estimator = clone(self._estimator)
estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params})
sampler = clone(self.base_sampler_)
if random_state is not None:
_set_random_states(estimator, random_state)
_set_random_states(sampler, random_state)
return estimator, sampler
@_fit_context(prefer_skip_nested_validation=True)
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, its dtype will be converted
to ``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,)
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
The fitted instance.
"""
self._validate_params()
# TODO: remove in 0.13
if self.sampling_strategy == "warn":
warn(
"The default of `sampling_strategy` will change from `'auto'` to "
"`'all'` in version 0.13. This change will follow the implementation "
"proposed in the original paper. Set to `'all'` to silence this "
"warning and adopt the future behaviour.",
FutureWarning,
)
self._sampling_strategy = "auto"
else:
self._sampling_strategy = self.sampling_strategy
if self.replacement == "warn":
warn(
"The default of `replacement` will change from `False` to "
"`True` in version 0.13. This change will follow the implementation "
"proposed in the original paper. Set to `True` to silence this "
"warning and adopt the future behaviour.",
FutureWarning,
)
self._replacement = False
else:
self._replacement = self.replacement
if self.bootstrap == "warn":
warn(
"The default of `bootstrap` will change from `True` to "
"`False` in version 0.13. This change will follow the implementation "
"proposed in the original paper. Set to `False` to silence this "
"warning and adopt the future behaviour.",
FutureWarning,
)
self._bootstrap = True
else:
self._bootstrap = self.bootstrap
# Validate or convert input data
if issparse(y):
raise ValueError("sparse multilabel-indicator for y is not supported.")
X, y = self._validate_data(
X, y, multi_output=True, accept_sparse="csc", dtype=DTYPE
)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
self._n_features = X.shape[1]
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning,
stacklevel=2,
)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y_encoded, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y_encoded = np.ascontiguousarray(y_encoded, dtype=DOUBLE)
if isinstance(self._sampling_strategy, dict):
self._sampling_strategy = {
np.where(self.classes_[0] == key)[0][0]: value
for key, value in check_sampling_strategy(
self.sampling_strategy,
y,
"under-sampling",
).items()
}
else:
self._sampling_strategy = self._sampling_strategy
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Get bootstrap sample size
n_samples_bootstrap = _get_n_samples_bootstrap(
n_samples=X.shape[0], max_samples=self.max_samples
)
# Check parameters
self._validate_estimator()
if not self._bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start or not hasattr(self, "estimators_"):
# Free allocated memory, if any
self.estimators_ = []
self.samplers_ = []
self.pipelines_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError(
"n_estimators=%d must be larger or equal to "
"len(estimators_)=%d when warm_start==True"
% (self.n_estimators, len(self.estimators_))
)
elif n_more_estimators == 0:
warn(
"Warm-start fitting without increasing n_estimators does not "
"fit new trees."
)
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
samplers = []
for _ in range(n_more_estimators):
tree, sampler = self._make_sampler_estimator(random_state=random_state)
trees.append(tree)
samplers.append(sampler)
# Parallel loop: we prefer the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading more efficient than multiprocessing in
# that case. However, we respect any parallel_backend contexts set
# at a higher level, since correctness does not rely on using
# threads.
samplers_trees = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose,
prefer="threads",
)(
delayed(_local_parallel_build_trees)(
s,
t,
self._bootstrap,
X,
y_encoded,
sample_weight,
i,
len(trees),
verbose=self.verbose,
class_weight=self.class_weight,
n_samples_bootstrap=n_samples_bootstrap,
forest=self,
)
for i, (s, t) in enumerate(zip(samplers, trees))
)
samplers, trees = zip(*samplers_trees)
# Collect newly grown trees
self.estimators_.extend(trees)
self.samplers_.extend(samplers)
# Create pipeline with the fitted samplers and trees
self.pipelines_.extend(
[
make_pipeline(deepcopy(s), deepcopy(t))
for s, t in zip(samplers, trees)
]
)
if self.oob_score:
y_type = type_of_target(y)
if y_type in ("multiclass-multioutput", "unknown"):
# FIXME: we could consider to support multiclass-multioutput if
# we introduce or reuse a constructor parameter (e.g.
# oob_score) allowing our user to pass a callable defining the
# scoring strategy on OOB sample.
raise ValueError(
"The type of target cannot be used to compute OOB "
f"estimates. Got {y_type} while only the following are "
"supported: continuous, continuous-multioutput, binary, "
"multiclass, multilabel-indicator."
)
self._set_oob_score_and_attributes(X, y_encoded)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _set_oob_score_and_attributes(self, X, y):
"""Compute and set the OOB score and attributes.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
"""
self.oob_decision_function_ = self._compute_oob_predictions(X, y)
if self.oob_decision_function_.shape[-1] == 1:
# drop the n_outputs axis if there is a single output
self.oob_decision_function_ = self.oob_decision_function_.squeeze(axis=-1)
from sklearn.metrics import accuracy_score
self.oob_score_ = accuracy_score(
y, np.argmax(self.oob_decision_function_, axis=1)
)
def _compute_oob_predictions(self, X, y):
"""Compute and set the OOB score.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
y : ndarray of shape (n_samples, n_outputs)
The target matrix.
Returns
-------
oob_pred : ndarray of shape (n_samples, n_classes, n_outputs) or \
(n_samples, 1, n_outputs)
The OOB predictions.
"""
# Prediction requires X to be in CSR format
if issparse(X):
X = X.tocsr()
n_samples = y.shape[0]
n_outputs = self.n_outputs_
if is_classifier(self) and hasattr(self, "n_classes_"):
# n_classes_ is a ndarray at this stage
# all the supported type of target will have the same number of
# classes in all outputs
oob_pred_shape = (n_samples, self.n_classes_[0], n_outputs)
else:
# for regression, n_classes_ does not exist and we create an empty
# axis to be consistent with the classification case and make
# the array operations compatible with the 2 settings
oob_pred_shape = (n_samples, 1, n_outputs)
oob_pred = np.zeros(shape=oob_pred_shape, dtype=np.float64)
n_oob_pred = np.zeros((n_samples, n_outputs), dtype=np.int64)
for sampler, estimator in zip(self.samplers_, self.estimators_):
X_resample = X[sampler.sample_indices_]
y_resample = y[sampler.sample_indices_]
n_sample_subset = y_resample.shape[0]
n_samples_bootstrap = _get_n_samples_bootstrap(
n_sample_subset, self.max_samples
)
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_sample_subset, n_samples_bootstrap
)
y_pred = self._get_oob_predictions(
estimator, X_resample[unsampled_indices, :]
)
indices = sampler.sample_indices_[unsampled_indices]
oob_pred[indices, ...] += y_pred
n_oob_pred[indices, :] += 1
for k in range(n_outputs):
if (n_oob_pred == 0).any():
warn(
"Some inputs do not have OOB scores. This probably means "
"too few trees were used to compute any reliable OOB "
"estimates.",
UserWarning,
)
n_oob_pred[n_oob_pred == 0] = 1
oob_pred[..., k] /= n_oob_pred[..., [k]]
return oob_pred
# TODO: remove when supporting scikit-learn>=1.4
@property
def estimator_(self):
"""Estimator used to grow the ensemble."""
return self._estimator
# TODO: remove when supporting scikit-learn>=1.2
@property
def n_features_(self):
"""Number of features when ``fit`` is performed."""
warn(
"`n_features_` was deprecated in scikit-learn 1.0. This attribute will "
"not be accessible when the minimum supported version of scikit-learn "
"is 1.2.",
FutureWarning,
)
return self.n_features_in_
def _more_tags(self):
return {
"multioutput": False,
"multilabel": False,
}
| 33,610 | 37.677791 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/ensemble/_bagging.py | """Bagging classifier trained on balanced bootstrap samples."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import copy
import inspect
import numbers
import warnings
import numpy as np
import sklearn
from sklearn.base import clone
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble._bagging import _parallel_decision_function
from sklearn.ensemble._base import _partition_estimators
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import parse_version
from sklearn.utils.validation import check_is_fitted
try:
# scikit-learn >= 1.2
from sklearn.utils.parallel import Parallel, delayed
except (ImportError, ModuleNotFoundError):
from joblib import Parallel
from sklearn.utils.fixes import delayed
from ..base import _ParamsValidationMixin
from ..pipeline import Pipeline
from ..under_sampling import RandomUnderSampler
from ..under_sampling.base import BaseUnderSampler
from ..utils import Substitution, check_sampling_strategy, check_target_type
from ..utils._available_if import available_if
from ..utils._docstring import _n_jobs_docstring, _random_state_docstring
from ..utils._param_validation import HasMethods, Interval, StrOptions
from ..utils.fixes import _fit_context
from ._common import _bagging_parameter_constraints, _estimator_has
sklearn_version = parse_version(sklearn.__version__)
@Substitution(
sampling_strategy=BaseUnderSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class BalancedBaggingClassifier(_ParamsValidationMixin, BaggingClassifier):
"""A Bagging classifier with additional balancing.
This implementation of Bagging is similar to the scikit-learn
implementation. It includes an additional step to balance the training set
at fit time using a given sampler.
This classifier can serves as a basis to implement various methods such as
Exactly Balanced Bagging [6]_, Roughly Balanced Bagging [7]_,
Over-Bagging [6]_, or SMOTE-Bagging [8]_.
Read more in the :ref:`User Guide <bagging>`.
Parameters
----------
estimator : estimator object, default=None
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a decision tree.
.. versionadded:: 0.10
n_estimators : int, default=10
The number of base estimators in the ensemble.
max_samples : int or float, default=1.0
The number of samples to draw from X to train each base estimator.
- If int, then draw ``max_samples`` samples.
- If float, then draw ``max_samples * X.shape[0]`` samples.
max_features : int or float, default=1.0
The number of features to draw from X to train each base estimator.
- If int, then draw ``max_features`` features.
- If float, then draw ``max_features * X.shape[1]`` features.
bootstrap : bool, default=True
Whether samples are drawn with replacement.
.. note::
Note that this bootstrap will be generated from the resampled
dataset.
bootstrap_features : bool, default=False
Whether features are drawn with replacement.
oob_score : bool, default=False
Whether to use out-of-bag samples to estimate
the generalization error.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble.
{sampling_strategy}
replacement : bool, default=False
Whether or not to randomly sample with replacement or not when
`sampler is None`, corresponding to a
:class:`~imblearn.under_sampling.RandomUnderSampler`.
{n_jobs}
{random_state}
verbose : int, default=0
Controls the verbosity of the building process.
sampler : sampler object, default=None
The sampler used to balanced the dataset before to bootstrap
(if `bootstrap=True`) and `fit` a base estimator. By default, a
:class:`~imblearn.under_sampling.RandomUnderSampler` is used.
.. versionadded:: 0.8
base_estimator : estimator object, default=None
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a decision tree.
.. deprecated:: 0.10
`base_estimator` was renamed to `estimator` in version 0.10 and
will be removed in 0.12.
Attributes
----------
estimator_ : estimator
The base estimator from which the ensemble is grown.
.. versionadded:: 0.10
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
.. deprecated:: 1.2
`base_estimator_` is deprecated in `scikit-learn` 1.2 and will be
removed in 1.4. Use `estimator_` instead. When the minimum version
of `scikit-learn` supported by `imbalanced-learn` will reach 1.4,
this attribute will be removed.
n_features_ : int
The number of features when `fit` is performed.
.. deprecated:: 1.0
`n_features_` is deprecated in `scikit-learn` 1.0 and will be removed
in version 1.2. When the minimum version of `scikit-learn` supported
by `imbalanced-learn` will reach 1.2, this attribute will be removed.
estimators_ : list of estimators
The collection of fitted base estimators.
sampler_ : sampler object
The validate sampler created from the `sampler` parameter.
estimators_samples_ : list of ndarray
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator. Each subset is defined by a boolean mask.
estimators_features_ : list of ndarray
The subset of drawn features for each base estimator.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_classes_ : int or list
The number of classes.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : ndarray of shape (n_samples, n_classes)
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
``oob_decision_function_`` might contain NaN.
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.9
See Also
--------
BalancedRandomForestClassifier : Random forest applying random-under
sampling to balance the different bootstraps.
EasyEnsembleClassifier : Ensemble of AdaBoost classifier trained on
balanced bootstraps.
RUSBoostClassifier : AdaBoost classifier were each bootstrap is balanced
using random-under sampling at each round of boosting.
Notes
-----
This is possible to turn this classifier into a balanced random forest [5]_
by passing a :class:`~sklearn.tree.DecisionTreeClassifier` with
`max_features='auto'` as a base estimator.
See
:ref:`sphx_glr_auto_examples_ensemble_plot_comparison_ensemble_classifier.py`.
References
----------
.. [1] L. Breiman, "Pasting small votes for classification in large
databases and on-line", Machine Learning, 36(1), 85-103, 1999.
.. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1996.
.. [3] T. Ho, "The random subspace method for constructing decision
forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1998.
.. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
Learning and Knowledge Discovery in Databases, 346-361, 2012.
.. [5] C. Chen Chao, A. Liaw, and L. Breiman. "Using random forest to
learn imbalanced data." University of California, Berkeley 110,
2004.
.. [6] R. Maclin, and D. Opitz. "An empirical evaluation of bagging and
boosting." AAAI/IAAI 1997 (1997): 546-551.
.. [7] S. Hido, H. Kashima, and Y. Takahashi. "Roughly balanced bagging
for imbalanced data." Statistical Analysis and Data Mining: The ASA
Data Science Journal 2.5‐6 (2009): 412-426.
.. [8] S. Wang, and X. Yao. "Diversity analysis on imbalanced data sets by
using ensemble models." 2009 IEEE symposium on computational
intelligence and data mining. IEEE, 2009.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.metrics import confusion_matrix
>>> from imblearn.ensemble import BalancedBaggingClassifier
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... random_state=0)
>>> bbc = BalancedBaggingClassifier(random_state=42)
>>> bbc.fit(X_train, y_train)
BalancedBaggingClassifier(...)
>>> y_pred = bbc.predict(X_test)
>>> print(confusion_matrix(y_test, y_pred))
[[ 23 0]
[ 2 225]]
"""
# make a deepcopy to not modify the original dictionary
if sklearn_version >= parse_version("1.3"):
_parameter_constraints = copy.deepcopy(BaggingClassifier._parameter_constraints)
else:
_parameter_constraints = copy.deepcopy(_bagging_parameter_constraints)
_parameter_constraints.update(
{
"sampling_strategy": [
Interval(numbers.Real, 0, 1, closed="right"),
StrOptions({"auto", "majority", "not minority", "not majority", "all"}),
dict,
callable,
],
"replacement": ["boolean"],
"sampler": [HasMethods(["fit_resample"]), None],
}
)
def __init__(
self,
estimator=None,
n_estimators=10,
*,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
sampling_strategy="auto",
replacement=False,
n_jobs=None,
random_state=None,
verbose=0,
sampler=None,
base_estimator="deprecated",
):
# TODO: remove when supporting scikit-learn>=1.2
bagging_classifier_signature = inspect.signature(super().__init__)
estimator_params = {"base_estimator": base_estimator}
if "estimator" in bagging_classifier_signature.parameters:
estimator_params["estimator"] = estimator
else:
self.estimator = estimator
super().__init__(
**estimator_params,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
oob_score=oob_score,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
)
self.sampling_strategy = sampling_strategy
self.replacement = replacement
self.sampler = sampler
def _validate_y(self, y):
y_encoded = super()._validate_y(y)
if (
isinstance(self.sampling_strategy, dict)
and self.sampler_._sampling_type != "bypass"
):
self._sampling_strategy = {
np.where(self.classes_ == key)[0][0]: value
for key, value in check_sampling_strategy(
self.sampling_strategy,
y,
self.sampler_._sampling_type,
).items()
}
else:
self._sampling_strategy = self.sampling_strategy
return y_encoded
def _validate_estimator(self, default=DecisionTreeClassifier()):
"""Check the estimator and the n_estimator attribute, set the
`estimator_` attribute."""
if self.estimator is not None and (
self.base_estimator not in [None, "deprecated"]
):
raise ValueError(
"Both `estimator` and `base_estimator` were set. Only set `estimator`."
)
if self.estimator is not None:
base_estimator = clone(self.estimator)
elif self.base_estimator not in [None, "deprecated"]:
warnings.warn(
"`base_estimator` was renamed to `estimator` in version 0.10 and "
"will be removed in 0.12.",
FutureWarning,
)
base_estimator = clone(self.base_estimator)
else:
base_estimator = clone(default)
if self.sampler_._sampling_type != "bypass":
self.sampler_.set_params(sampling_strategy=self._sampling_strategy)
self._estimator = Pipeline(
[("sampler", self.sampler_), ("classifier", base_estimator)]
)
try:
# scikit-learn < 1.2
self.base_estimator_ = self._estimator
except AttributeError:
pass
# TODO: remove when supporting scikit-learn>=1.4
@property
def estimator_(self):
"""Estimator used to grow the ensemble."""
return self._estimator
# TODO: remove when supporting scikit-learn>=1.2
@property
def n_features_(self):
"""Number of features when ``fit`` is performed."""
warnings.warn(
"`n_features_` was deprecated in scikit-learn 1.0. This attribute will "
"not be accessible when the minimum supported version of scikit-learn "
"is 1.2.",
FutureWarning,
)
return self.n_features_in_
@_fit_context(prefer_skip_nested_validation=False)
def fit(self, X, y):
"""Build a Bagging ensemble of estimators from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Fitted estimator.
"""
# overwrite the base class method by disallowing `sample_weight`
self._validate_params()
return super().fit(X, y)
def _fit(self, X, y, max_samples=None, max_depth=None, sample_weight=None):
check_target_type(y)
# the sampler needs to be validated before to call _fit because
# _validate_y is called before _validate_estimator and would require
# to know which type of sampler we are using.
if self.sampler is None:
self.sampler_ = RandomUnderSampler(
replacement=self.replacement,
)
else:
self.sampler_ = clone(self.sampler)
# RandomUnderSampler is not supporting sample_weight. We need to pass
# None.
return super()._fit(X, y, self.max_samples, sample_weight=None)
# TODO: remove when minimum supported version of scikit-learn is 1.1
@available_if(_estimator_has("decision_function"))
def decision_function(self, X):
"""Average of the decision functions of the base classifiers.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
score : ndarray of shape (n_samples, k)
The decision function of the input samples. The columns correspond
to the classes in sorted order, as they appear in the attribute
``classes_``. Regression and binary classification are special
cases with ``k == 1``, otherwise ``k==n_classes``.
"""
check_is_fitted(self)
# Check data
X = self._validate_data(
X,
accept_sparse=["csr", "csc"],
dtype=None,
force_all_finite=False,
reset=False,
)
# Parallel loop
n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)
all_decisions = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_decision_function)(
self.estimators_[starts[i] : starts[i + 1]],
self.estimators_features_[starts[i] : starts[i + 1]],
X,
)
for i in range(n_jobs)
)
# Reduce
decisions = sum(all_decisions) / self.n_estimators
return decisions
def _more_tags(self):
tags = super()._more_tags()
tags_key = "_xfail_checks"
failing_test = "check_estimators_nan_inf"
reason = "Fails because the sampler removed infinity and NaN values"
if tags_key in tags:
tags[tags_key][failing_test] = reason
else:
tags[tags_key] = {failing_test: reason}
return tags
| 17,943 | 35.177419 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/ensemble/_easy_ensemble.py | """Class to perform under-sampling using easy ensemble."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import copy
import inspect
import numbers
import warnings
import numpy as np
import sklearn
from sklearn.base import clone
from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier
from sklearn.ensemble._bagging import _parallel_decision_function
from sklearn.ensemble._base import _partition_estimators
from sklearn.utils import parse_version
from sklearn.utils._tags import _safe_tags
from sklearn.utils.validation import check_is_fitted
try:
# scikit-learn >= 1.2
from sklearn.utils.parallel import Parallel, delayed
except (ImportError, ModuleNotFoundError):
from joblib import Parallel
from sklearn.utils.fixes import delayed
from ..base import _ParamsValidationMixin
from ..pipeline import Pipeline
from ..under_sampling import RandomUnderSampler
from ..under_sampling.base import BaseUnderSampler
from ..utils import Substitution, check_sampling_strategy, check_target_type
from ..utils._available_if import available_if
from ..utils._docstring import _n_jobs_docstring, _random_state_docstring
from ..utils._param_validation import Interval, StrOptions
from ..utils.fixes import _fit_context
from ._common import _bagging_parameter_constraints, _estimator_has
MAX_INT = np.iinfo(np.int32).max
sklearn_version = parse_version(sklearn.__version__)
@Substitution(
sampling_strategy=BaseUnderSampler._sampling_strategy_docstring,
n_jobs=_n_jobs_docstring,
random_state=_random_state_docstring,
)
class EasyEnsembleClassifier(_ParamsValidationMixin, BaggingClassifier):
"""Bag of balanced boosted learners also known as EasyEnsemble.
This algorithm is known as EasyEnsemble [1]_. The classifier is an
ensemble of AdaBoost learners trained on different balanced bootstrap
samples. The balancing is achieved by random under-sampling.
Read more in the :ref:`User Guide <boosting>`.
.. versionadded:: 0.4
Parameters
----------
n_estimators : int, default=10
Number of AdaBoost learners in the ensemble.
estimator : estimator object, default=AdaBoostClassifier()
The base AdaBoost classifier used in the inner ensemble. Note that you
can set the number of inner learner by passing your own instance.
.. versionadded:: 0.10
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble.
{sampling_strategy}
replacement : bool, default=False
Whether or not to sample randomly with replacement or not.
{n_jobs}
{random_state}
verbose : int, default=0
Controls the verbosity of the building process.
base_estimator : estimator object, default=AdaBoostClassifier()
The base AdaBoost classifier used in the inner ensemble. Note that you
can set the number of inner learner by passing your own instance.
.. deprecated:: 0.10
`base_estimator` was renamed to `estimator` in version 0.10 and will
be removed in 0.12.
Attributes
----------
estimator_ : estimator
The base estimator from which the ensemble is grown.
.. versionadded:: 0.10
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
.. deprecated:: 1.2
`base_estimator_` is deprecated in `scikit-learn` 1.2 and will be
removed in 1.4. Use `estimator_` instead. When the minimum version
of `scikit-learn` supported by `imbalanced-learn` will reach 1.4,
this attribute will be removed.
estimators_ : list of estimators
The collection of fitted base estimators.
estimators_samples_ : list of arrays
The subset of drawn samples for each base estimator.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
classes_ : array, shape (n_classes,)
The classes labels.
n_classes_ : int or list
The number of classes.
n_features_ : int
The number of features when `fit` is performed.
.. deprecated:: 1.0
`n_features_` is deprecated in `scikit-learn` 1.0 and will be removed
in version 1.2. When the minimum version of `scikit-learn` supported
by `imbalanced-learn` will reach 1.2, this attribute will be removed.
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.9
See Also
--------
BalancedBaggingClassifier : Bagging classifier for which each base
estimator is trained on a balanced bootstrap.
BalancedRandomForestClassifier : Random forest applying random-under
sampling to balance the different bootstraps.
RUSBoostClassifier : AdaBoost classifier were each bootstrap is balanced
using random-under sampling at each round of boosting.
Notes
-----
The method is described in [1]_.
Supports multi-class resampling by sampling each class independently.
References
----------
.. [1] X. Y. Liu, J. Wu and Z. H. Zhou, "Exploratory Undersampling for
Class-Imbalance Learning," in IEEE Transactions on Systems, Man, and
Cybernetics, Part B (Cybernetics), vol. 39, no. 2, pp. 539-550,
April 2009.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from sklearn.model_selection import train_test_split
>>> from sklearn.metrics import confusion_matrix
>>> from imblearn.ensemble import EasyEnsembleClassifier
>>> X, y = make_classification(n_classes=2, class_sep=2,
... weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1, n_samples=1000, random_state=10)
>>> print('Original dataset shape %s' % Counter(y))
Original dataset shape Counter({{1: 900, 0: 100}})
>>> X_train, X_test, y_train, y_test = train_test_split(X, y,
... random_state=0)
>>> eec = EasyEnsembleClassifier(random_state=42)
>>> eec.fit(X_train, y_train)
EasyEnsembleClassifier(...)
>>> y_pred = eec.predict(X_test)
>>> print(confusion_matrix(y_test, y_pred))
[[ 23 0]
[ 2 225]]
"""
# make a deepcopy to not modify the original dictionary
if sklearn_version >= parse_version("1.3"):
_parameter_constraints = copy.deepcopy(BaggingClassifier._parameter_constraints)
else:
_parameter_constraints = copy.deepcopy(_bagging_parameter_constraints)
excluded_params = {
"bootstrap",
"bootstrap_features",
"max_features",
"oob_score",
"max_samples",
}
for param in excluded_params:
_parameter_constraints.pop(param, None)
_parameter_constraints.update(
{
"sampling_strategy": [
Interval(numbers.Real, 0, 1, closed="right"),
StrOptions({"auto", "majority", "not minority", "not majority", "all"}),
dict,
callable,
],
"replacement": ["boolean"],
}
)
def __init__(
self,
n_estimators=10,
estimator=None,
*,
warm_start=False,
sampling_strategy="auto",
replacement=False,
n_jobs=None,
random_state=None,
verbose=0,
base_estimator="deprecated",
):
# TODO: remove when supporting scikit-learn>=1.2
bagging_classifier_signature = inspect.signature(super().__init__)
estimator_params = {"base_estimator": base_estimator}
if "estimator" in bagging_classifier_signature.parameters:
estimator_params["estimator"] = estimator
else:
self.estimator = estimator
super().__init__(
**estimator_params,
n_estimators=n_estimators,
max_samples=1.0,
max_features=1.0,
bootstrap=False,
bootstrap_features=False,
oob_score=False,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
)
self.sampling_strategy = sampling_strategy
self.replacement = replacement
def _validate_y(self, y):
y_encoded = super()._validate_y(y)
if isinstance(self.sampling_strategy, dict):
self._sampling_strategy = {
np.where(self.classes_ == key)[0][0]: value
for key, value in check_sampling_strategy(
self.sampling_strategy,
y,
"under-sampling",
).items()
}
else:
self._sampling_strategy = self.sampling_strategy
return y_encoded
def _validate_estimator(self, default=AdaBoostClassifier()):
"""Check the estimator and the n_estimator attribute, set the
`estimator_` attribute."""
if self.estimator is not None and (
self.base_estimator not in [None, "deprecated"]
):
raise ValueError(
"Both `estimator` and `base_estimator` were set. Only set `estimator`."
)
if self.estimator is not None:
base_estimator = clone(self.estimator)
elif self.base_estimator not in [None, "deprecated"]:
warnings.warn(
"`base_estimator` was renamed to `estimator` in version 0.10 and "
"will be removed in 0.12.",
FutureWarning,
)
base_estimator = clone(self.base_estimator)
else:
base_estimator = clone(default)
sampler = RandomUnderSampler(
sampling_strategy=self._sampling_strategy,
replacement=self.replacement,
)
self._estimator = Pipeline(
[("sampler", sampler), ("classifier", base_estimator)]
)
try:
self.base_estimator_ = self._estimator
except AttributeError:
# scikit-learn < 1.2
pass
# TODO: remove when supporting scikit-learn>=1.4
@property
def estimator_(self):
"""Estimator used to grow the ensemble."""
return self._estimator
# TODO: remove when supporting scikit-learn>=1.2
@property
def n_features_(self):
"""Number of features when ``fit`` is performed."""
warnings.warn(
"`n_features_` was deprecated in scikit-learn 1.0. This attribute will "
"not be accessible when the minimum supported version of scikit-learn "
"is 1.2.",
FutureWarning,
)
return self.n_features_in_
@_fit_context(prefer_skip_nested_validation=False)
def fit(self, X, y):
"""Build a Bagging ensemble of estimators from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
# overwrite the base class method by disallowing `sample_weight`
return super().fit(X, y)
def _fit(self, X, y, max_samples=None, max_depth=None, sample_weight=None):
check_target_type(y)
# RandomUnderSampler is not supporting sample_weight. We need to pass
# None.
return super()._fit(X, y, self.max_samples, sample_weight=None)
# TODO: remove when minimum supported version of scikit-learn is 1.1
@available_if(_estimator_has("decision_function"))
def decision_function(self, X):
"""Average of the decision functions of the base classifiers.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
score : ndarray of shape (n_samples, k)
The decision function of the input samples. The columns correspond
to the classes in sorted order, as they appear in the attribute
``classes_``. Regression and binary classification are special
cases with ``k == 1``, otherwise ``k==n_classes``.
"""
check_is_fitted(self)
# Check data
X = self._validate_data(
X,
accept_sparse=["csr", "csc"],
dtype=None,
force_all_finite=False,
reset=False,
)
# Parallel loop
n_jobs, _, starts = _partition_estimators(self.n_estimators, self.n_jobs)
all_decisions = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_decision_function)(
self.estimators_[starts[i] : starts[i + 1]],
self.estimators_features_[starts[i] : starts[i + 1]],
X,
)
for i in range(n_jobs)
)
# Reduce
decisions = sum(all_decisions) / self.n_estimators
return decisions
def _more_tags(self):
if self.estimator is None:
estimator = AdaBoostClassifier()
else:
estimator = self.estimator
return {"allow_nan": _safe_tags(estimator, "allow_nan")}
| 14,078 | 33.507353 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/ensemble/_weight_boosting.py | import copy
import inspect
import numbers
import warnings
from copy import deepcopy
import numpy as np
import sklearn
from sklearn.base import clone
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble._base import _set_random_states
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import _safe_indexing, parse_version
from sklearn.utils.validation import has_fit_parameter
from ..base import _ParamsValidationMixin
from ..pipeline import make_pipeline
from ..under_sampling import RandomUnderSampler
from ..under_sampling.base import BaseUnderSampler
from ..utils import Substitution, check_target_type
from ..utils._docstring import _random_state_docstring
from ..utils._param_validation import Interval, StrOptions
from ..utils.fixes import _fit_context
from ._common import _adaboost_classifier_parameter_constraints
sklearn_version = parse_version(sklearn.__version__)
@Substitution(
sampling_strategy=BaseUnderSampler._sampling_strategy_docstring,
random_state=_random_state_docstring,
)
class RUSBoostClassifier(_ParamsValidationMixin, AdaBoostClassifier):
"""Random under-sampling integrated in the learning of AdaBoost.
During learning, the problem of class balancing is alleviated by random
under-sampling the sample at each iteration of the boosting algorithm.
Read more in the :ref:`User Guide <boosting>`.
.. versionadded:: 0.4
Parameters
----------
estimator : estimator object, default=None
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper
``classes_`` and ``n_classes_`` attributes. If ``None``, then
the base estimator is ``DecisionTreeClassifier(max_depth=1)``.
.. versionadded:: 0.12
n_estimators : int, default=50
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, default=1.0
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {{'SAMME', 'SAMME.R'}}, default='SAMME.R'
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
{sampling_strategy}
replacement : bool, default=False
Whether or not to sample randomly with replacement or not.
{random_state}
base_estimator : estimator object, default=None
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper
``classes_`` and ``n_classes_`` attributes. If ``None``, then
the base estimator is ``DecisionTreeClassifier(max_depth=1)``.
.. deprecated:: 0.10
`base_estimator` is deprecated in version 0.10 and will be removed
in 0.12. Use `estimator` instead.
Attributes
----------
estimator_ : estimator
The base estimator from which the ensemble is grown.
.. versionadded:: 0.10
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
.. deprecated:: 1.2
`base_estimator_` is deprecated in `scikit-learn` 1.2 and will be
removed in 1.4. Use `estimator_` instead. When the minimum version
of `scikit-learn` supported by `imbalanced-learn` will reach 1.4,
this attribute will be removed.
estimators_ : list of classifiers
The collection of fitted sub-estimators.
base_sampler_ : :class:`~imblearn.under_sampling.RandomUnderSampler`
The base sampler used to generate the subsequent samplers.
samplers_ : list of :class:`~imblearn.under_sampling.RandomUnderSampler`
The collection of fitted samplers.
pipelines_ : list of Pipeline
The collection of fitted pipelines (samplers + trees).
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : ndarray of shape (n_estimator,)
Weights for each estimator in the boosted ensemble.
estimator_errors_ : ndarray of shape (n_estimator,)
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : ndarray of shape (n_features,)
The feature importances if supported by the ``base_estimator``.
n_features_in_ : int
Number of features in the input dataset.
.. versionadded:: 0.9
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during `fit`. Defined only when `X` has feature
names that are all strings.
.. versionadded:: 0.9
See Also
--------
BalancedBaggingClassifier : Bagging classifier for which each base
estimator is trained on a balanced bootstrap.
BalancedRandomForestClassifier : Random forest applying random-under
sampling to balance the different bootstraps.
EasyEnsembleClassifier : Ensemble of AdaBoost classifier trained on
balanced bootstraps.
References
----------
.. [1] Seiffert, C., Khoshgoftaar, T. M., Van Hulse, J., & Napolitano, A.
"RUSBoost: A hybrid approach to alleviating class imbalance." IEEE
Transactions on Systems, Man, and Cybernetics-Part A: Systems and Humans
40.1 (2010): 185-197.
Examples
--------
>>> from imblearn.ensemble import RUSBoostClassifier
>>> from sklearn.datasets import make_classification
>>>
>>> X, y = make_classification(n_samples=1000, n_classes=3,
... n_informative=4, weights=[0.2, 0.3, 0.5],
... random_state=0)
>>> clf = RUSBoostClassifier(random_state=0)
>>> clf.fit(X, y)
RUSBoostClassifier(...)
>>> clf.predict(X)
array([...])
"""
# make a deepcopy to not modify the original dictionary
if sklearn_version >= parse_version("1.3"):
_parameter_constraints = copy.deepcopy(
AdaBoostClassifier._parameter_constraints
)
else:
_parameter_constraints = copy.deepcopy(
_adaboost_classifier_parameter_constraints
)
_parameter_constraints.update(
{
"sampling_strategy": [
Interval(numbers.Real, 0, 1, closed="right"),
StrOptions({"auto", "majority", "not minority", "not majority", "all"}),
dict,
callable,
],
"replacement": ["boolean"],
}
)
def __init__(
self,
estimator=None,
*,
n_estimators=50,
learning_rate=1.0,
algorithm="SAMME.R",
sampling_strategy="auto",
replacement=False,
random_state=None,
base_estimator="deprecated",
):
# TODO: remove when supporting scikit-learn>=1.2
bagging_classifier_signature = inspect.signature(super().__init__)
estimator_params = {"base_estimator": base_estimator}
if "estimator" in bagging_classifier_signature.parameters:
estimator_params["estimator"] = estimator
else:
self.estimator = estimator
super().__init__(
**estimator_params,
n_estimators=n_estimators,
learning_rate=learning_rate,
algorithm=algorithm,
random_state=random_state,
)
self.sampling_strategy = sampling_strategy
self.replacement = replacement
@_fit_context(prefer_skip_nested_validation=False)
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape (n_samples,)
The target values (class labels).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
self._validate_params()
check_target_type(y)
self.samplers_ = []
self.pipelines_ = []
super().fit(X, y, sample_weight)
return self
def _validate_estimator(self):
"""Check the estimator and the n_estimator attribute.
Sets the `estimator_` attributes.
"""
if self.estimator is not None and (
self.base_estimator not in [None, "deprecated"]
):
raise ValueError(
"Both `estimator` and `base_estimator` were set. Only set `estimator`."
)
default = DecisionTreeClassifier(max_depth=1)
if self.estimator is not None:
base_estimator = clone(self.estimator)
elif self.base_estimator not in [None, "deprecated"]:
warnings.warn(
"`base_estimator` was renamed to `estimator` in version 0.10 and "
"will be removed in 0.12.",
FutureWarning,
)
base_estimator = clone(self.base_estimator)
else:
base_estimator = clone(default)
self._estimator = base_estimator
try:
# scikit-learn < 1.2
self.base_estimator_ = self._estimator
except AttributeError:
pass
# SAMME-R requires predict_proba-enabled estimators
if self.algorithm == "SAMME.R":
if not hasattr(self._estimator, "predict_proba"):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead."
)
if not has_fit_parameter(self._estimator, "sample_weight"):
raise ValueError(
f"{self._estimator.__class__.__name__} doesn't support sample_weight."
)
self.base_sampler_ = RandomUnderSampler(
sampling_strategy=self.sampling_strategy,
replacement=self.replacement,
)
def _make_sampler_estimator(self, append=True, random_state=None):
"""Make and configure a copy of the `base_estimator_` attribute.
Warning: This method should be used to properly instantiate new
sub-estimators.
"""
estimator = clone(self._estimator)
estimator.set_params(**{p: getattr(self, p) for p in self.estimator_params})
sampler = clone(self.base_sampler_)
if random_state is not None:
_set_random_states(estimator, random_state)
_set_random_states(sampler, random_state)
if append:
self.estimators_.append(estimator)
self.samplers_.append(sampler)
self.pipelines_.append(
make_pipeline(deepcopy(sampler), deepcopy(estimator))
)
return estimator, sampler
def _boost_real(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator, sampler = self._make_sampler_estimator(random_state=random_state)
X_res, y_res = sampler.fit_resample(X, y)
sample_weight_res = _safe_indexing(sample_weight, sampler.sample_indices_)
estimator.fit(X_res, y_res, sample_weight=sample_weight_res)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, "classes_", None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1), axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1.0, 0.0
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1.0 / (n_classes - 1), 1.0])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba)
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (
-1.0
* self.learning_rate
* ((n_classes - 1.0) / n_classes)
* (y_coding * np.log(y_predict_proba)).sum(axis=1)
)
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(
estimator_weight * ((sample_weight > 0) | (estimator_weight < 0))
)
return sample_weight, 1.0, estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator, sampler = self._make_sampler_estimator(random_state=random_state)
X_res, y_res = sampler.fit_resample(X, y)
sample_weight_res = _safe_indexing(sample_weight, sampler.sample_indices_)
estimator.fit(X_res, y_res, sample_weight=sample_weight_res)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, "classes_", None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1.0, 0.0
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1.0 - (1.0 / n_classes):
self.estimators_.pop(-1)
self.samplers_.pop(-1)
self.pipelines_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError(
"BaseClassifier in AdaBoostClassifier "
"ensemble is worse than random, ensemble "
"can not be fit."
)
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1.0 - estimator_error) / estimator_error) + np.log(n_classes - 1.0)
)
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect * (sample_weight > 0))
return sample_weight, estimator_weight, estimator_error
# TODO: remove when supporting scikit-learn>=1.4
@property
def estimator_(self):
"""Estimator used to grow the ensemble."""
return self._estimator
| 16,255 | 35.44843 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/ensemble/__init__.py | """
The :mod:`imblearn.ensemble` module include methods generating
under-sampled subsets combined inside an ensemble.
"""
from ._bagging import BalancedBaggingClassifier
from ._easy_ensemble import EasyEnsembleClassifier
from ._forest import BalancedRandomForestClassifier
from ._weight_boosting import RUSBoostClassifier
__all__ = [
"BalancedBaggingClassifier",
"BalancedRandomForestClassifier",
"EasyEnsembleClassifier",
"RUSBoostClassifier",
]
| 465 | 26.411765 | 62 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/ensemble/_common.py | from numbers import Integral, Real
from sklearn.tree._criterion import Criterion
from ..utils._param_validation import (
HasMethods,
Hidden,
Interval,
RealNotInt,
StrOptions,
)
def _estimator_has(attr):
"""Check if we can delegate a method to the underlying estimator.
First, we check the first fitted estimator if available, otherwise we
check the estimator attribute.
"""
def check(self):
if hasattr(self, "estimators_"):
return hasattr(self.estimators_[0], attr)
elif self.estimator is not None:
return hasattr(self.estimator, attr)
else: # TODO(1.4): Remove when the base_estimator deprecation cycle ends
return hasattr(self.base_estimator, attr)
return check
_bagging_parameter_constraints = {
"estimator": [HasMethods(["fit", "predict"]), None],
"n_estimators": [Interval(Integral, 1, None, closed="left")],
"max_samples": [
Interval(Integral, 1, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="right"),
],
"max_features": [
Interval(Integral, 1, None, closed="left"),
Interval(RealNotInt, 0, 1, closed="right"),
],
"bootstrap": ["boolean"],
"bootstrap_features": ["boolean"],
"oob_score": ["boolean"],
"warm_start": ["boolean"],
"n_jobs": [None, Integral],
"random_state": ["random_state"],
"verbose": ["verbose"],
"base_estimator": [
HasMethods(["fit", "predict"]),
StrOptions({"deprecated"}),
None,
],
}
_adaboost_classifier_parameter_constraints = {
"estimator": [HasMethods(["fit", "predict"]), None],
"n_estimators": [Interval(Integral, 1, None, closed="left")],
"learning_rate": [Interval(Real, 0, None, closed="neither")],
"random_state": ["random_state"],
"base_estimator": [HasMethods(["fit", "predict"]), StrOptions({"deprecated"})],
"algorithm": [StrOptions({"SAMME", "SAMME.R"})],
}
_random_forest_classifier_parameter_constraints = {
"n_estimators": [Interval(Integral, 1, None, closed="left")],
"bootstrap": ["boolean"],
"oob_score": ["boolean"],
"n_jobs": [Integral, None],
"random_state": ["random_state"],
"verbose": ["verbose"],
"warm_start": ["boolean"],
"criterion": [StrOptions({"gini", "entropy", "log_loss"}), Hidden(Criterion)],
"max_samples": [
None,
Interval(Real, 0.0, 1.0, closed="right"),
Interval(Integral, 1, None, closed="left"),
],
"max_depth": [Interval(Integral, 1, None, closed="left"), None],
"min_samples_split": [
Interval(Integral, 2, None, closed="left"),
Interval(RealNotInt, 0.0, 1.0, closed="right"),
],
"min_samples_leaf": [
Interval(Integral, 1, None, closed="left"),
Interval(RealNotInt, 0.0, 1.0, closed="neither"),
],
"min_weight_fraction_leaf": [Interval(Real, 0.0, 0.5, closed="both")],
"max_features": [
Interval(Integral, 1, None, closed="left"),
Interval(RealNotInt, 0.0, 1.0, closed="right"),
StrOptions({"sqrt", "log2"}),
None,
],
"max_leaf_nodes": [Interval(Integral, 2, None, closed="left"), None],
"min_impurity_decrease": [Interval(Real, 0.0, None, closed="left")],
"ccp_alpha": [Interval(Real, 0.0, None, closed="left")],
"class_weight": [
StrOptions({"balanced_subsample", "balanced"}),
dict,
list,
None,
],
}
| 3,455 | 31.914286 | 83 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/ensemble/tests/test_bagging.py | """Test the module ensemble classifiers."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
from collections import Counter
import numpy as np
import pytest
import sklearn
from sklearn.cluster import KMeans
from sklearn.datasets import load_iris, make_classification, make_hastie_10_2
from sklearn.dummy import DummyClassifier
from sklearn.feature_selection import SelectKBest
from sklearn.linear_model import LogisticRegression, Perceptron
from sklearn.model_selection import GridSearchCV, ParameterGrid, train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils._testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
)
from sklearn.utils.fixes import parse_version
from imblearn import FunctionSampler
from imblearn.datasets import make_imbalance
from imblearn.ensemble import BalancedBaggingClassifier
from imblearn.over_sampling import SMOTE, RandomOverSampler
from imblearn.pipeline import make_pipeline
from imblearn.under_sampling import ClusterCentroids, RandomUnderSampler
sklearn_version = parse_version(sklearn.__version__)
iris = load_iris()
@pytest.mark.parametrize(
"estimator",
[
None,
DummyClassifier(strategy="prior"),
Perceptron(max_iter=1000, tol=1e-3),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC(gamma="scale"),
],
)
@pytest.mark.parametrize(
"params",
ParameterGrid(
{
"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False],
}
),
)
def test_balanced_bagging_classifier(estimator, params):
# Check classification for various parameter settings.
X, y = make_imbalance(
iris.data,
iris.target,
sampling_strategy={0: 20, 1: 25, 2: 50},
random_state=0,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
bag = BalancedBaggingClassifier(estimator=estimator, random_state=0, **params).fit(
X_train, y_train
)
bag.predict(X_test)
bag.predict_proba(X_test)
bag.score(X_test, y_test)
if hasattr(estimator, "decision_function"):
bag.decision_function(X_test)
def test_bootstrap_samples():
# Test that bootstrapping samples generate non-perfect base estimators.
X, y = make_imbalance(
iris.data,
iris.target,
sampling_strategy={0: 20, 1: 25, 2: 50},
random_state=0,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
estimator = DecisionTreeClassifier().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
# disable the resampling by passing an empty dictionary.
ensemble = BalancedBaggingClassifier(
estimator=DecisionTreeClassifier(),
max_samples=1.0,
bootstrap=False,
n_estimators=10,
sampling_strategy={},
random_state=0,
).fit(X_train, y_train)
assert ensemble.score(X_train, y_train) == estimator.score(X_train, y_train)
# with bootstrap, trees are no longer perfect on the training set
ensemble = BalancedBaggingClassifier(
estimator=DecisionTreeClassifier(),
max_samples=1.0,
bootstrap=True,
random_state=0,
).fit(X_train, y_train)
assert ensemble.score(X_train, y_train) < estimator.score(X_train, y_train)
def test_bootstrap_features():
# Test that bootstrapping features may generate duplicate features.
X, y = make_imbalance(
iris.data,
iris.target,
sampling_strategy={0: 20, 1: 25, 2: 50},
random_state=0,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
ensemble = BalancedBaggingClassifier(
estimator=DecisionTreeClassifier(),
max_features=1.0,
bootstrap_features=False,
random_state=0,
).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert np.unique(features).shape[0] == X.shape[1]
ensemble = BalancedBaggingClassifier(
estimator=DecisionTreeClassifier(),
max_features=1.0,
bootstrap_features=True,
random_state=0,
).fit(X_train, y_train)
unique_features = [
np.unique(features).shape[0] for features in ensemble.estimators_features_
]
assert np.median(unique_features) < X.shape[1]
def test_probability():
# Predict probabilities.
X, y = make_imbalance(
iris.data,
iris.target,
sampling_strategy={0: 20, 1: 25, 2: 50},
random_state=0,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BalancedBaggingClassifier(
estimator=DecisionTreeClassifier(), random_state=0
).fit(X_train, y_train)
assert_array_almost_equal(
np.sum(ensemble.predict_proba(X_test), axis=1),
np.ones(len(X_test)),
)
assert_array_almost_equal(
ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)),
)
# Degenerate case, where some classes are missing
ensemble = BalancedBaggingClassifier(
estimator=LogisticRegression(solver="lbfgs", multi_class="auto"),
random_state=0,
max_samples=5,
)
ensemble.fit(X_train, y_train)
assert_array_almost_equal(
np.sum(ensemble.predict_proba(X_test), axis=1),
np.ones(len(X_test)),
)
assert_array_almost_equal(
ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)),
)
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
X, y = make_imbalance(
iris.data,
iris.target,
sampling_strategy={0: 20, 1: 25, 2: 50},
random_state=0,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for estimator in [DecisionTreeClassifier(), SVC(gamma="scale")]:
clf = BalancedBaggingClassifier(
estimator=estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=0,
).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert abs(test_score - clf.oob_score_) < 0.1
# Test with few estimators
with pytest.warns(UserWarning):
BalancedBaggingClassifier(
estimator=estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=0,
).fit(X_train, y_train)
def test_single_estimator():
# Check singleton ensembles.
X, y = make_imbalance(
iris.data,
iris.target,
sampling_strategy={0: 20, 1: 25, 2: 50},
random_state=0,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf1 = BalancedBaggingClassifier(
estimator=KNeighborsClassifier(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=0,
).fit(X_train, y_train)
clf2 = make_pipeline(
RandomUnderSampler(random_state=clf1.estimators_[0].steps[0][1].random_state),
KNeighborsClassifier(),
).fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target.copy()
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {"n_estimators": (1, 2), "estimator__C": (1, 2)}
GridSearchCV(
BalancedBaggingClassifier(SVC(gamma="scale")),
parameters,
cv=3,
scoring="roc_auc",
).fit(X, y)
def test_estimator():
# Check estimator and its default values.
X, y = make_imbalance(
iris.data,
iris.target,
sampling_strategy={0: 20, 1: 25, 2: 50},
random_state=0,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
ensemble = BalancedBaggingClassifier(None, n_jobs=3, random_state=0).fit(
X_train, y_train
)
assert isinstance(ensemble.estimator_.steps[-1][1], DecisionTreeClassifier)
ensemble = BalancedBaggingClassifier(
DecisionTreeClassifier(), n_jobs=3, random_state=0
).fit(X_train, y_train)
assert isinstance(ensemble.estimator_.steps[-1][1], DecisionTreeClassifier)
ensemble = BalancedBaggingClassifier(
Perceptron(max_iter=1000, tol=1e-3), n_jobs=3, random_state=0
).fit(X_train, y_train)
assert isinstance(ensemble.estimator_.steps[-1][1], Perceptron)
def test_bagging_with_pipeline():
X, y = make_imbalance(
iris.data,
iris.target,
sampling_strategy={0: 20, 1: 25, 2: 50},
random_state=0,
)
estimator = BalancedBaggingClassifier(
make_pipeline(SelectKBest(k=1), DecisionTreeClassifier()),
max_features=2,
)
estimator.fit(X, y).predict(X)
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BalancedBaggingClassifier(
n_estimators=n_estimators,
random_state=random_state,
warm_start=True,
)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert len(clf_ws) == n_estimators
clf_no_ws = BalancedBaggingClassifier(
n_estimators=10, random_state=random_state, warm_start=False
)
clf_no_ws.fit(X, y)
assert {pipe.steps[-1][1].random_state for pipe in clf_ws} == {
pipe.steps[-1][1].random_state for pipe in clf_no_ws
}
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BalancedBaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
with pytest.raises(ValueError):
clf.fit(X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BalancedBaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.0
warn_msg = "Warm-start fitting without increasing n_estimators does not"
with pytest.warns(UserWarning, match=warn_msg):
clf.fit(X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BalancedBaggingClassifier(
n_estimators=5, warm_start=True, random_state=3141
)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BalancedBaggingClassifier(
n_estimators=10, warm_start=False, random_state=3141
)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BalancedBaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
with pytest.raises(ValueError):
clf.fit(X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BalancedBaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
with pytest.raises(AttributeError):
getattr(clf, "oob_score_")
def test_oob_score_consistency():
# Make sure OOB scores are identical when random_state, estimator, and
# training data are fixed and fitting is done twice
X, y = make_hastie_10_2(n_samples=200, random_state=1)
bagging = BalancedBaggingClassifier(
KNeighborsClassifier(),
max_samples=0.5,
max_features=0.5,
oob_score=True,
random_state=1,
)
assert bagging.fit(X, y).oob_score_ == bagging.fit(X, y).oob_score_
def test_estimators_samples():
# Check that format of estimators_samples_ is correct and that results
# generated at fit time can be identically reproduced at a later time
# using data saved in object attributes.
X, y = make_hastie_10_2(n_samples=200, random_state=1)
# remap the y outside of the BalancedBaggingclassifier
# _, y = np.unique(y, return_inverse=True)
bagging = BalancedBaggingClassifier(
LogisticRegression(solver="lbfgs", multi_class="auto"),
max_samples=0.5,
max_features=0.5,
random_state=1,
bootstrap=False,
)
bagging.fit(X, y)
# Get relevant attributes
estimators_samples = bagging.estimators_samples_
estimators_features = bagging.estimators_features_
estimators = bagging.estimators_
# Test for correct formatting
assert len(estimators_samples) == len(estimators)
assert len(estimators_samples[0]) == len(X) // 2
assert estimators_samples[0].dtype.kind == "i"
# Re-fit single estimator to test for consistent sampling
estimator_index = 0
estimator_samples = estimators_samples[estimator_index]
estimator_features = estimators_features[estimator_index]
estimator = estimators[estimator_index]
X_train = (X[estimator_samples])[:, estimator_features]
y_train = y[estimator_samples]
orig_coefs = estimator.steps[-1][1].coef_
estimator.fit(X_train, y_train)
new_coefs = estimator.steps[-1][1].coef_
assert_allclose(orig_coefs, new_coefs)
def test_max_samples_consistency():
# Make sure validated max_samples and original max_samples are identical
# when valid integer max_samples supplied by user
max_samples = 100
X, y = make_hastie_10_2(n_samples=2 * max_samples, random_state=1)
bagging = BalancedBaggingClassifier(
KNeighborsClassifier(),
max_samples=max_samples,
max_features=0.5,
random_state=1,
)
bagging.fit(X, y)
assert bagging._max_samples == max_samples
class CountDecisionTreeClassifier(DecisionTreeClassifier):
"""DecisionTreeClassifier that will memorize the number of samples seen
at fit."""
def fit(self, X, y, sample_weight=None):
self.class_counts_ = Counter(y)
return super().fit(X, y, sample_weight=sample_weight)
@pytest.mark.filterwarnings("ignore:Number of distinct clusters")
@pytest.mark.parametrize(
"sampler, n_samples_bootstrap",
[
(None, 15),
(RandomUnderSampler(), 15), # under-sampling with sample_indices_
(
ClusterCentroids(estimator=KMeans(n_init=1)),
15,
), # under-sampling without sample_indices_
(RandomOverSampler(), 40), # over-sampling with sample_indices_
(SMOTE(), 40), # over-sampling without sample_indices_
],
)
def test_balanced_bagging_classifier_samplers(sampler, n_samples_bootstrap):
# check that we can pass any kind of sampler to a bagging classifier
X, y = make_imbalance(
iris.data,
iris.target,
sampling_strategy={0: 20, 1: 25, 2: 50},
random_state=0,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = BalancedBaggingClassifier(
estimator=CountDecisionTreeClassifier(),
n_estimators=2,
sampler=sampler,
random_state=0,
)
clf.fit(X_train, y_train)
clf.predict(X_test)
# check that we have balanced class with the right counts of class
# sample depending on the sampling strategy
assert_array_equal(
list(clf.estimators_[0][-1].class_counts_.values()), n_samples_bootstrap
)
@pytest.mark.parametrize("replace", [True, False])
def test_balanced_bagging_classifier_with_function_sampler(replace):
# check that we can provide a FunctionSampler in BalancedBaggingClassifier
X, y = make_classification(
n_samples=1_000,
n_features=10,
n_classes=2,
weights=[0.3, 0.7],
random_state=0,
)
def roughly_balanced_bagging(X, y, replace=False):
"""Implementation of Roughly Balanced Bagging for binary problem."""
# find the minority and majority classes
class_counts = Counter(y)
majority_class = max(class_counts, key=class_counts.get)
minority_class = min(class_counts, key=class_counts.get)
# compute the number of sample to draw from the majority class using
# a negative binomial distribution
n_minority_class = class_counts[minority_class]
n_majority_resampled = np.random.negative_binomial(n=n_minority_class, p=0.5)
# draw randomly with or without replacement
majority_indices = np.random.choice(
np.flatnonzero(y == majority_class),
size=n_majority_resampled,
replace=replace,
)
minority_indices = np.random.choice(
np.flatnonzero(y == minority_class),
size=n_minority_class,
replace=replace,
)
indices = np.hstack([majority_indices, minority_indices])
return X[indices], y[indices]
# Roughly Balanced Bagging
rbb = BalancedBaggingClassifier(
estimator=CountDecisionTreeClassifier(random_state=0),
n_estimators=2,
sampler=FunctionSampler(
func=roughly_balanced_bagging, kw_args={"replace": replace}
),
random_state=0,
)
rbb.fit(X, y)
for estimator in rbb.estimators_:
class_counts = estimator[-1].class_counts_
assert (class_counts[0] / class_counts[1]) > 0.78
def test_balanced_bagging_classifier_n_features():
"""Check that we raise a FutureWarning when accessing `n_features_`."""
X, y = load_iris(return_X_y=True)
estimator = BalancedBaggingClassifier().fit(X, y)
with pytest.warns(FutureWarning, match="`n_features_` was deprecated"):
estimator.n_features_
@pytest.mark.skipif(
sklearn_version < parse_version("1.2"), reason="requires scikit-learn>=1.2"
)
def test_balanced_bagging_classifier_base_estimator():
"""Check that we raise a FutureWarning when accessing `base_estimator_`."""
X, y = load_iris(return_X_y=True)
estimator = BalancedBaggingClassifier().fit(X, y)
with pytest.warns(FutureWarning, match="`base_estimator_` was deprecated"):
estimator.base_estimator_
def test_balanced_bagging_classifier_set_both_estimator_and_base_estimator():
"""Check that we raise a ValueError when setting both `estimator` and
`base_estimator`."""
X, y = load_iris(return_X_y=True)
err_msg = "Both `estimator` and `base_estimator` were set. Only set `estimator`."
with pytest.raises(ValueError, match=err_msg):
BalancedBaggingClassifier(
estimator=KNeighborsClassifier(), base_estimator=KNeighborsClassifier()
).fit(X, y)
| 20,190 | 31.724473 | 87 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/ensemble/tests/test_forest.py | import numpy as np
import pytest
import sklearn
from sklearn.datasets import load_iris, make_classification
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.utils._testing import assert_allclose, assert_array_equal
from sklearn.utils.fixes import parse_version
from imblearn.ensemble import BalancedRandomForestClassifier
sklearn_version = parse_version(sklearn.__version__)
@pytest.fixture
def imbalanced_dataset():
return make_classification(
n_samples=10000,
n_features=2,
n_informative=2,
n_redundant=0,
n_repeated=0,
n_classes=3,
n_clusters_per_class=1,
weights=[0.01, 0.05, 0.94],
class_sep=0.8,
random_state=0,
)
def test_balanced_random_forest_error_warning_warm_start(imbalanced_dataset):
brf = BalancedRandomForestClassifier(
n_estimators=5, sampling_strategy="all", replacement=True, bootstrap=False
)
brf.fit(*imbalanced_dataset)
with pytest.raises(ValueError, match="must be larger or equal to"):
brf.set_params(warm_start=True, n_estimators=2)
brf.fit(*imbalanced_dataset)
brf.set_params(n_estimators=10)
brf.fit(*imbalanced_dataset)
with pytest.warns(UserWarning, match="Warm-start fitting without"):
brf.fit(*imbalanced_dataset)
def test_balanced_random_forest(imbalanced_dataset):
n_estimators = 10
brf = BalancedRandomForestClassifier(
n_estimators=n_estimators,
random_state=0,
sampling_strategy="all",
replacement=True,
bootstrap=False,
)
brf.fit(*imbalanced_dataset)
assert len(brf.samplers_) == n_estimators
assert len(brf.estimators_) == n_estimators
assert len(brf.pipelines_) == n_estimators
assert len(brf.feature_importances_) == imbalanced_dataset[0].shape[1]
def test_balanced_random_forest_attributes(imbalanced_dataset):
X, y = imbalanced_dataset
n_estimators = 10
brf = BalancedRandomForestClassifier(
n_estimators=n_estimators,
random_state=0,
sampling_strategy="all",
replacement=True,
bootstrap=False,
)
brf.fit(X, y)
for idx in range(n_estimators):
X_res, y_res = brf.samplers_[idx].fit_resample(X, y)
X_res_2, y_res_2 = (
brf.pipelines_[idx].named_steps["randomundersampler"].fit_resample(X, y)
)
assert_allclose(X_res, X_res_2)
assert_array_equal(y_res, y_res_2)
y_pred = brf.estimators_[idx].fit(X_res, y_res).predict(X)
y_pred_2 = brf.pipelines_[idx].fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred_2)
y_pred = brf.estimators_[idx].fit(X_res, y_res).predict_proba(X)
y_pred_2 = brf.pipelines_[idx].fit(X, y).predict_proba(X)
assert_array_equal(y_pred, y_pred_2)
def test_balanced_random_forest_sample_weight(imbalanced_dataset):
rng = np.random.RandomState(42)
X, y = imbalanced_dataset
sample_weight = rng.rand(y.shape[0])
brf = BalancedRandomForestClassifier(
n_estimators=5,
random_state=0,
sampling_strategy="all",
replacement=True,
bootstrap=False,
)
brf.fit(X, y, sample_weight)
@pytest.mark.filterwarnings("ignore:Some inputs do not have OOB scores")
def test_balanced_random_forest_oob(imbalanced_dataset):
X, y = imbalanced_dataset
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42, stratify=y
)
est = BalancedRandomForestClassifier(
oob_score=True,
random_state=0,
n_estimators=1000,
min_samples_leaf=2,
sampling_strategy="all",
replacement=True,
bootstrap=True,
)
est.fit(X_train, y_train)
test_score = est.score(X_test, y_test)
assert abs(test_score - est.oob_score_) < 0.1
# Check warning if not enough estimators
est = BalancedRandomForestClassifier(
oob_score=True,
random_state=0,
n_estimators=1,
bootstrap=True,
sampling_strategy="all",
replacement=True,
)
with pytest.warns(UserWarning) and np.errstate(divide="ignore", invalid="ignore"):
est.fit(X, y)
def test_balanced_random_forest_grid_search(imbalanced_dataset):
brf = BalancedRandomForestClassifier(
sampling_strategy="all", replacement=True, bootstrap=False
)
grid = GridSearchCV(brf, {"n_estimators": (1, 2), "max_depth": (1, 2)}, cv=3)
grid.fit(*imbalanced_dataset)
def test_little_tree_with_small_max_samples():
rng = np.random.RandomState(1)
X = rng.randn(10000, 2)
y = rng.randn(10000) > 0
# First fit with no restriction on max samples
est1 = BalancedRandomForestClassifier(
n_estimators=1,
random_state=rng,
max_samples=None,
sampling_strategy="all",
replacement=True,
bootstrap=True,
)
# Second fit with max samples restricted to just 2
est2 = BalancedRandomForestClassifier(
n_estimators=1,
random_state=rng,
max_samples=2,
sampling_strategy="all",
replacement=True,
bootstrap=True,
)
est1.fit(X, y)
est2.fit(X, y)
tree1 = est1.estimators_[0].tree_
tree2 = est2.estimators_[0].tree_
msg = "Tree without `max_samples` restriction should have more nodes"
assert tree1.node_count > tree2.node_count, msg
def test_balanced_random_forest_pruning(imbalanced_dataset):
brf = BalancedRandomForestClassifier(
sampling_strategy="all", replacement=True, bootstrap=False
)
brf.fit(*imbalanced_dataset)
n_nodes_no_pruning = brf.estimators_[0].tree_.node_count
brf_pruned = BalancedRandomForestClassifier(
ccp_alpha=0.015, sampling_strategy="all", replacement=True, bootstrap=False
)
brf_pruned.fit(*imbalanced_dataset)
n_nodes_pruning = brf_pruned.estimators_[0].tree_.node_count
assert n_nodes_no_pruning > n_nodes_pruning
@pytest.mark.parametrize("ratio", [0.5, 0.1])
@pytest.mark.filterwarnings("ignore:Some inputs do not have OOB scores")
def test_balanced_random_forest_oob_binomial(ratio):
# Regression test for #655: check that the oob score is closed to 0.5
# a binomial experiment.
rng = np.random.RandomState(42)
n_samples = 1000
X = np.arange(n_samples).reshape(-1, 1)
y = rng.binomial(1, ratio, size=n_samples)
erf = BalancedRandomForestClassifier(
oob_score=True,
random_state=42,
sampling_strategy="not minority",
replacement=False,
bootstrap=True,
)
erf.fit(X, y)
assert np.abs(erf.oob_score_ - 0.5) < 0.1
def test_balanced_bagging_classifier_n_features():
"""Check that we raise a FutureWarning when accessing `n_features_`."""
X, y = load_iris(return_X_y=True)
estimator = BalancedRandomForestClassifier(
sampling_strategy="all", replacement=True, bootstrap=False
).fit(X, y)
with pytest.warns(FutureWarning, match="`n_features_` was deprecated"):
estimator.n_features_
@pytest.mark.skipif(
sklearn_version < parse_version("1.2"), reason="requires scikit-learn>=1.2"
)
def test_balanced_random_forest_classifier_base_estimator():
"""Check that we raise a FutureWarning when accessing `base_estimator_`."""
X, y = load_iris(return_X_y=True)
estimator = BalancedRandomForestClassifier(
sampling_strategy="all", replacement=True, bootstrap=False
).fit(X, y)
with pytest.warns(FutureWarning, match="`base_estimator_` was deprecated"):
estimator.base_estimator_
# TODO: remove in 0.13
def test_balanced_random_forest_change_behaviour(imbalanced_dataset):
"""Check that we raise a change of behaviour for the parameters `sampling_strategy`
and `replacement`.
"""
estimator = BalancedRandomForestClassifier(sampling_strategy="all", bootstrap=False)
with pytest.warns(FutureWarning, match="The default of `replacement`"):
estimator.fit(*imbalanced_dataset)
estimator = BalancedRandomForestClassifier(replacement=True, bootstrap=False)
with pytest.warns(FutureWarning, match="The default of `sampling_strategy`"):
estimator.fit(*imbalanced_dataset)
estimator = BalancedRandomForestClassifier(
sampling_strategy="all", replacement=True
)
with pytest.warns(FutureWarning, match="The default of `bootstrap`"):
estimator.fit(*imbalanced_dataset)
| 8,447 | 31.367816 | 88 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/ensemble/tests/test_easy_ensemble.py | """Test the module easy ensemble."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import numpy as np
import pytest
import sklearn
from sklearn.datasets import load_iris, make_hastie_10_2
from sklearn.ensemble import AdaBoostClassifier
from sklearn.feature_selection import SelectKBest
from sklearn.model_selection import GridSearchCV, train_test_split
from sklearn.utils._testing import assert_allclose, assert_array_equal
from sklearn.utils.fixes import parse_version
from imblearn.datasets import make_imbalance
from imblearn.ensemble import EasyEnsembleClassifier
from imblearn.pipeline import make_pipeline
from imblearn.under_sampling import RandomUnderSampler
sklearn_version = parse_version(sklearn.__version__)
iris = load_iris()
# Generate a global dataset to use
RND_SEED = 0
X = np.array(
[
[0.5220963, 0.11349303],
[0.59091459, 0.40692742],
[1.10915364, 0.05718352],
[0.22039505, 0.26469445],
[1.35269503, 0.44812421],
[0.85117925, 1.0185556],
[-2.10724436, 0.70263997],
[-0.23627356, 0.30254174],
[-1.23195149, 0.15427291],
[-0.58539673, 0.62515052],
]
)
Y = np.array([1, 2, 2, 2, 1, 0, 1, 1, 1, 0])
@pytest.mark.parametrize("n_estimators", [10, 20])
@pytest.mark.parametrize(
"estimator",
[AdaBoostClassifier(n_estimators=5), AdaBoostClassifier(n_estimators=10)],
)
def test_easy_ensemble_classifier(n_estimators, estimator):
# Check classification for various parameter settings.
X, y = make_imbalance(
iris.data,
iris.target,
sampling_strategy={0: 20, 1: 25, 2: 50},
random_state=0,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
eec = EasyEnsembleClassifier(
n_estimators=n_estimators,
estimator=estimator,
n_jobs=-1,
random_state=RND_SEED,
)
eec.fit(X_train, y_train).score(X_test, y_test)
assert len(eec.estimators_) == n_estimators
for est in eec.estimators_:
assert len(est.named_steps["classifier"]) == estimator.n_estimators
# test the different prediction function
eec.predict(X_test)
eec.predict_proba(X_test)
eec.predict_log_proba(X_test)
eec.decision_function(X_test)
def test_estimator():
# Check estimator and its default values.
X, y = make_imbalance(
iris.data,
iris.target,
sampling_strategy={0: 20, 1: 25, 2: 50},
random_state=0,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
ensemble = EasyEnsembleClassifier(2, None, n_jobs=-1, random_state=0).fit(
X_train, y_train
)
assert isinstance(ensemble.estimator_.steps[-1][1], AdaBoostClassifier)
ensemble = EasyEnsembleClassifier(
2, AdaBoostClassifier(), n_jobs=-1, random_state=0
).fit(X_train, y_train)
assert isinstance(ensemble.estimator_.steps[-1][1], AdaBoostClassifier)
def test_bagging_with_pipeline():
X, y = make_imbalance(
iris.data,
iris.target,
sampling_strategy={0: 20, 1: 25, 2: 50},
random_state=0,
)
estimator = EasyEnsembleClassifier(
n_estimators=2,
estimator=make_pipeline(SelectKBest(k=1), AdaBoostClassifier()),
)
estimator.fit(X, y).predict(X)
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = EasyEnsembleClassifier(
n_estimators=n_estimators,
random_state=random_state,
warm_start=True,
)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert len(clf_ws) == n_estimators
clf_no_ws = EasyEnsembleClassifier(
n_estimators=10, random_state=random_state, warm_start=False
)
clf_no_ws.fit(X, y)
assert {pipe.steps[-1][1].random_state for pipe in clf_ws} == {
pipe.steps[-1][1].random_state for pipe in clf_no_ws
}
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = EasyEnsembleClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
with pytest.raises(ValueError):
clf.fit(X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = EasyEnsembleClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.0
warn_msg = "Warm-start fitting without increasing n_estimators"
with pytest.warns(UserWarning, match=warn_msg):
clf.fit(X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = EasyEnsembleClassifier(n_estimators=5, warm_start=True, random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = EasyEnsembleClassifier(n_estimators=10, warm_start=False, random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_allclose(y1, y2)
def test_easy_ensemble_classifier_single_estimator():
X, y = make_imbalance(
iris.data,
iris.target,
sampling_strategy={0: 20, 1: 25, 2: 50},
random_state=0,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf1 = EasyEnsembleClassifier(n_estimators=1, random_state=0).fit(X_train, y_train)
clf2 = make_pipeline(
RandomUnderSampler(random_state=0), AdaBoostClassifier(random_state=0)
).fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_easy_ensemble_classifier_grid_search():
X, y = make_imbalance(
iris.data,
iris.target,
sampling_strategy={0: 20, 1: 25, 2: 50},
random_state=0,
)
parameters = {
"n_estimators": [1, 2],
"estimator__n_estimators": [3, 4],
}
grid_search = GridSearchCV(
EasyEnsembleClassifier(estimator=AdaBoostClassifier()),
parameters,
cv=5,
)
grid_search.fit(X, y)
def test_easy_ensemble_classifier_n_features():
"""Check that we raise a FutureWarning when accessing `n_features_`."""
X, y = load_iris(return_X_y=True)
estimator = EasyEnsembleClassifier().fit(X, y)
with pytest.warns(FutureWarning, match="`n_features_` was deprecated"):
estimator.n_features_
@pytest.mark.skipif(
sklearn_version < parse_version("1.2"), reason="warns for scikit-learn>=1.2"
)
def test_easy_ensemble_classifier_base_estimator():
"""Check that we raise a FutureWarning when accessing `base_estimator_`."""
X, y = load_iris(return_X_y=True)
estimator = EasyEnsembleClassifier().fit(X, y)
with pytest.warns(FutureWarning, match="`base_estimator_` was deprecated"):
estimator.base_estimator_
def test_easy_ensemble_classifier_set_both_estimator_and_base_estimator():
"""Check that we raise a ValueError when setting both `estimator` and
`base_estimator`."""
X, y = load_iris(return_X_y=True)
err_msg = "Both `estimator` and `base_estimator` were set. Only set `estimator`."
with pytest.raises(ValueError, match=err_msg):
EasyEnsembleClassifier(
estimator=AdaBoostClassifier(), base_estimator=AdaBoostClassifier()
).fit(X, y)
| 8,276 | 31.845238 | 87 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/ensemble/tests/__init__.py | 0 | 0 | 0 | py | |
imbalanced-learn | imbalanced-learn-master/imblearn/ensemble/tests/test_weight_boosting.py | import numpy as np
import pytest
import sklearn
from sklearn.datasets import load_iris, make_classification
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils._testing import assert_array_equal
from sklearn.utils.fixes import parse_version
from imblearn.ensemble import RUSBoostClassifier
sklearn_version = parse_version(sklearn.__version__)
@pytest.fixture
def imbalanced_dataset():
return make_classification(
n_samples=10000,
n_features=3,
n_informative=2,
n_redundant=0,
n_repeated=0,
n_classes=3,
n_clusters_per_class=1,
weights=[0.01, 0.05, 0.94],
class_sep=0.8,
random_state=0,
)
@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"])
def test_rusboost(imbalanced_dataset, algorithm):
X, y = imbalanced_dataset
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, random_state=1
)
classes = np.unique(y)
n_estimators = 500
rusboost = RUSBoostClassifier(
n_estimators=n_estimators, algorithm=algorithm, random_state=0
)
rusboost.fit(X_train, y_train)
assert_array_equal(classes, rusboost.classes_)
# check that we have an ensemble of samplers and estimators with a
# consistent size
assert len(rusboost.estimators_) > 1
assert len(rusboost.estimators_) == len(rusboost.samplers_)
assert len(rusboost.pipelines_) == len(rusboost.samplers_)
# each sampler in the ensemble should have different random state
assert len({sampler.random_state for sampler in rusboost.samplers_}) == len(
rusboost.samplers_
)
# each estimator in the ensemble should have different random state
assert len({est.random_state for est in rusboost.estimators_}) == len(
rusboost.estimators_
)
# check the consistency of the feature importances
assert len(rusboost.feature_importances_) == imbalanced_dataset[0].shape[1]
# check the consistency of the prediction outpus
y_pred = rusboost.predict_proba(X_test)
assert y_pred.shape[1] == len(classes)
assert rusboost.decision_function(X_test).shape[1] == len(classes)
score = rusboost.score(X_test, y_test)
assert score > 0.6, f"Failed with algorithm {algorithm} and score {score}"
y_pred = rusboost.predict(X_test)
assert y_pred.shape == y_test.shape
@pytest.mark.parametrize("algorithm", ["SAMME", "SAMME.R"])
def test_rusboost_sample_weight(imbalanced_dataset, algorithm):
X, y = imbalanced_dataset
sample_weight = np.ones_like(y)
rusboost = RUSBoostClassifier(algorithm=algorithm, random_state=0)
# Predictions should be the same when sample_weight are all ones
y_pred_sample_weight = rusboost.fit(X, y, sample_weight).predict(X)
y_pred_no_sample_weight = rusboost.fit(X, y).predict(X)
assert_array_equal(y_pred_sample_weight, y_pred_no_sample_weight)
rng = np.random.RandomState(42)
sample_weight = rng.rand(y.shape[0])
y_pred_sample_weight = rusboost.fit(X, y, sample_weight).predict(X)
with pytest.raises(AssertionError):
assert_array_equal(y_pred_no_sample_weight, y_pred_sample_weight)
@pytest.mark.skipif(
sklearn_version < parse_version("1.2"), reason="requires scikit-learn>=1.2"
)
def test_rus_boost_classifier_base_estimator():
"""Check that we raise a FutureWarning when accessing `base_estimator_`."""
X, y = load_iris(return_X_y=True)
estimator = RUSBoostClassifier().fit(X, y)
with pytest.warns(FutureWarning, match="`base_estimator_` was deprecated"):
estimator.base_estimator_
def test_rus_boost_classifier_set_both_estimator_and_base_estimator():
"""Check that we raise a ValueError when setting both `estimator` and
`base_estimator`."""
X, y = load_iris(return_X_y=True)
err_msg = "Both `estimator` and `base_estimator` were set. Only set `estimator`."
with pytest.raises(ValueError, match=err_msg):
RUSBoostClassifier(
estimator=DecisionTreeClassifier(), base_estimator=DecisionTreeClassifier()
).fit(X, y)
| 4,138 | 34.681034 | 87 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/tensorflow/_generator.py | """Implement generators for ``tensorflow`` which will balance the data."""
from scipy.sparse import issparse
from sklearn.base import clone
from sklearn.utils import _safe_indexing, check_random_state
from ..under_sampling import RandomUnderSampler
from ..utils import Substitution
from ..utils._docstring import _random_state_docstring
@Substitution(random_state=_random_state_docstring)
def balanced_batch_generator(
X,
y,
*,
sample_weight=None,
sampler=None,
batch_size=32,
keep_sparse=False,
random_state=None,
):
"""Create a balanced batch generator to train tensorflow model.
Returns a generator --- as well as the number of step per epoch --- to
iterate to get the mini-batches. The sampler defines the sampling strategy
used to balance the dataset ahead of creating the batch. The sampler should
have an attribute ``sample_indices_``.
.. versionadded:: 0.4
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Original imbalanced dataset.
y : ndarray of shape (n_samples,) or (n_samples, n_classes)
Associated targets.
sample_weight : ndarray of shape (n_samples,), default=None
Sample weight.
sampler : sampler object, default=None
A sampler instance which has an attribute ``sample_indices_``.
By default, the sampler used is a
:class:`~imblearn.under_sampling.RandomUnderSampler`.
batch_size : int, default=32
Number of samples per gradient update.
keep_sparse : bool, default=False
Either or not to conserve or not the sparsity of the input ``X``. By
default, the returned batches will be dense.
{random_state}
Returns
-------
generator : generator of tuple
Generate batch of data. The tuple generated are either (X_batch,
y_batch) or (X_batch, y_batch, sampler_weight_batch).
steps_per_epoch : int
The number of samples per epoch.
"""
random_state = check_random_state(random_state)
if sampler is None:
sampler_ = RandomUnderSampler(random_state=random_state)
else:
sampler_ = clone(sampler)
sampler_.fit_resample(X, y)
if not hasattr(sampler_, "sample_indices_"):
raise ValueError("'sampler' needs to have an attribute 'sample_indices_'.")
indices = sampler_.sample_indices_
# shuffle the indices since the sampler are packing them by class
random_state.shuffle(indices)
def generator(X, y, sample_weight, indices, batch_size):
while True:
for index in range(0, len(indices), batch_size):
X_res = _safe_indexing(X, indices[index : index + batch_size])
y_res = _safe_indexing(y, indices[index : index + batch_size])
if issparse(X_res) and not keep_sparse:
X_res = X_res.toarray()
if sample_weight is None:
yield X_res, y_res
else:
sw_res = _safe_indexing(
sample_weight, indices[index : index + batch_size]
)
yield X_res, y_res, sw_res
return (
generator(X, y, sample_weight, indices, batch_size),
int(indices.size // batch_size),
)
| 3,294 | 32.622449 | 83 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/tensorflow/__init__.py | """The :mod:`imblearn.tensorflow` provides utilities to deal with imbalanced
dataset in tensorflow."""
from ._generator import balanced_batch_generator
__all__ = ["balanced_batch_generator"]
| 193 | 26.714286 | 76 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/tensorflow/tests/test_generator.py | from distutils.version import LooseVersion
import numpy as np
import pytest
from scipy import sparse
from sklearn.datasets import load_iris
from imblearn.datasets import make_imbalance
from imblearn.over_sampling import RandomOverSampler
from imblearn.tensorflow import balanced_batch_generator
from imblearn.under_sampling import NearMiss
tf = pytest.importorskip("tensorflow")
@pytest.fixture
def data():
X, y = load_iris(return_X_y=True)
X, y = make_imbalance(X, y, sampling_strategy={0: 30, 1: 50, 2: 40})
X = X.astype(np.float32)
return X, y
def check_balanced_batch_generator_tf_1_X_X(dataset, sampler):
X, y = dataset
batch_size = 10
training_generator, steps_per_epoch = balanced_batch_generator(
X,
y,
sample_weight=None,
sampler=sampler,
batch_size=batch_size,
random_state=42,
)
learning_rate = 0.01
epochs = 10
input_size = X.shape[1]
output_size = 3
# helper functions
def init_weights(shape):
return tf.Variable(tf.random_normal(shape, stddev=0.01))
def accuracy(y_true, y_pred):
return np.mean(np.argmax(y_pred, axis=1) == y_true)
# input and output
data = tf.placeholder("float32", shape=[None, input_size])
targets = tf.placeholder("int32", shape=[None])
# build the model and weights
W = init_weights([input_size, output_size])
b = init_weights([output_size])
out_act = tf.nn.sigmoid(tf.matmul(data, W) + b)
# build the loss, predict, and train operator
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=out_act, labels=targets
)
loss = tf.reduce_sum(cross_entropy)
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss)
predict = tf.nn.softmax(out_act)
# Initialization of all variables in the graph
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for e in range(epochs):
for i in range(steps_per_epoch):
X_batch, y_batch = next(training_generator)
sess.run(
[train_op, loss],
feed_dict={data: X_batch, targets: y_batch},
)
# For each epoch, run accuracy on train and test
predicts_train = sess.run(predict, feed_dict={data: X})
print(f"epoch: {e} train accuracy: {accuracy(y, predicts_train):.3f}")
def check_balanced_batch_generator_tf_2_X_X_compat_1_X_X(dataset, sampler):
tf.compat.v1.disable_eager_execution()
X, y = dataset
batch_size = 10
training_generator, steps_per_epoch = balanced_batch_generator(
X,
y,
sample_weight=None,
sampler=sampler,
batch_size=batch_size,
random_state=42,
)
learning_rate = 0.01
epochs = 10
input_size = X.shape[1]
output_size = 3
# helper functions
def init_weights(shape):
return tf.Variable(tf.random.normal(shape, stddev=0.01))
def accuracy(y_true, y_pred):
return np.mean(np.argmax(y_pred, axis=1) == y_true)
# input and output
data = tf.compat.v1.placeholder("float32", shape=[None, input_size])
targets = tf.compat.v1.placeholder("int32", shape=[None])
# build the model and weights
W = init_weights([input_size, output_size])
b = init_weights([output_size])
out_act = tf.nn.sigmoid(tf.matmul(data, W) + b)
# build the loss, predict, and train operator
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=out_act, labels=targets
)
loss = tf.reduce_sum(input_tensor=cross_entropy)
optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss)
predict = tf.nn.softmax(out_act)
# Initialization of all variables in the graph
init = tf.compat.v1.global_variables_initializer()
with tf.compat.v1.Session() as sess:
sess.run(init)
for e in range(epochs):
for i in range(steps_per_epoch):
X_batch, y_batch = next(training_generator)
sess.run(
[train_op, loss],
feed_dict={data: X_batch, targets: y_batch},
)
# For each epoch, run accuracy on train and test
predicts_train = sess.run(predict, feed_dict={data: X})
print(f"epoch: {e} train accuracy: {accuracy(y, predicts_train):.3f}")
@pytest.mark.parametrize("sampler", [None, NearMiss(), RandomOverSampler()])
def test_balanced_batch_generator(data, sampler):
if LooseVersion(tf.__version__) < "2":
check_balanced_batch_generator_tf_1_X_X(data, sampler)
else:
check_balanced_batch_generator_tf_2_X_X_compat_1_X_X(data, sampler)
@pytest.mark.parametrize("keep_sparse", [True, False])
def test_balanced_batch_generator_function_sparse(data, keep_sparse):
X, y = data
training_generator, steps_per_epoch = balanced_batch_generator(
sparse.csr_matrix(X),
y,
keep_sparse=keep_sparse,
batch_size=10,
random_state=42,
)
for idx in range(steps_per_epoch):
X_batch, y_batch = next(training_generator)
if keep_sparse:
assert sparse.issparse(X_batch)
else:
assert not sparse.issparse(X_batch)
| 5,406 | 30.254335 | 82 | py |
imbalanced-learn | imbalanced-learn-master/imblearn/tensorflow/tests/__init__.py | 0 | 0 | 0 | py | |
imbalanced-learn | imbalanced-learn-master/doc/conf.py | # -*- coding: utf-8 -*-
#
# imbalanced-learn documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 18 14:44:12 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from datetime import datetime
from io import StringIO
from pathlib import Path
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("sphinxext"))
from github_link import make_linkcode_resolve # noqa
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.linkcode",
"sphinxcontrib.bibtex",
"numpydoc",
"sphinx_issues",
"sphinx_gallery.gen_gallery",
"sphinx_copybutton",
]
# Specify how to identify the prompt when copying code snippets
copybutton_prompt_text = r">>> |\.\.\. "
copybutton_prompt_is_regexp = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "imbalanced-learn"
copyright = f"2014-{datetime.now().year}, The imbalanced-learn developers"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
from imblearn import __version__ # noqa
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "_templates"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "literal"
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "pydata_sphinx_theme"
html_title = f"Version {version}"
html_favicon = "_static/img/favicon.ico"
html_logo = "_static/img/logo_wide.png"
html_style = "css/imbalanced-learn.css"
html_css_files = [
"css/imbalanced-learn.css",
]
html_sidebars = {
"changelog": [],
}
html_theme_options = {
"external_links": [],
"github_url": "https://github.com/scikit-learn-contrib/imbalanced-learn",
# "twitter_url": "https://twitter.com/pandas_dev",
"use_edit_page_button": True,
"show_toc_level": 1,
# "navbar_align": "right", # For testing that the navbar items align properly
}
html_context = {
"github_user": "scikit-learn-contrib",
"github_repo": "imbalanced-learn",
"github_version": "master",
"doc_path": "doc",
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Output file base name for HTML help builder.
htmlhelp_basename = "imbalanced-learndoc"
# -- Options for autodoc ------------------------------------------------------
autodoc_default_options = {
"members": True,
"inherited-members": True,
}
# generate autosummary even if no references
autosummary_generate = True
# -- Options for numpydoc -----------------------------------------------------
# this is needed for some reason...
# see https://github.com/numpy/numpydoc/issues/69
numpydoc_show_class_members = False
# -- Options for sphinxcontrib-bibtex -----------------------------------------
# bibtex file
bibtex_bibfiles = ["bibtex/refs.bib"]
# -- Options for intersphinx --------------------------------------------------
# intersphinx configuration
intersphinx_mapping = {
"python": ("https://docs.python.org/{.major}".format(sys.version_info), None),
"numpy": ("https://numpy.org/doc/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
"matplotlib": ("https://matplotlib.org/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"joblib": ("https://joblib.readthedocs.io/en/latest/", None),
"seaborn": ("https://seaborn.pydata.org/", None),
}
# -- Options for sphinx-gallery -----------------------------------------------
# Generate the plot for the gallery
plot_gallery = True
# sphinx-gallery configuration
sphinx_gallery_conf = {
"doc_module": "imblearn",
"backreferences_dir": os.path.join("references/generated"),
"show_memory": True,
"reference_url": {"imblearn": None},
}
# -- Options for github link for what's new -----------------------------------
# Config for sphinx_issues
issues_uri = "https://github.com/scikit-learn-contrib/imbalanced-learn/issues/{issue}"
issues_github_path = "scikit-learn-contrib/imbalanced-learn"
issues_user_uri = "https://github.com/{user}"
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve(
"imblearn",
"https://github.com/scikit-learn-contrib/"
"imbalanced-learn/blob/{revision}/"
"{package}/{path}#L{lineno}",
)
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
"index",
"imbalanced-learn.tex",
"imbalanced-learn Documentation",
"The imbalanced-learn developers",
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# If false, no module index is generated.
# latex_domain_indices = True
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"imbalanced-learn",
"imbalanced-learn Documentation",
["The imbalanced-learn developers"],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"imbalanced-learn",
"imbalanced-learn Documentation",
"The imbalanced-learn developerss",
"imbalanced-learn",
"Toolbox for imbalanced dataset in machine learning.",
"Miscellaneous",
),
]
# -- Dependencies generation ----------------------------------------------
def generate_min_dependency_table(app):
"""Generate min dependency table for docs."""
from sklearn._min_dependencies import dependent_packages
# get length of header
package_header_len = max(len(package) for package in dependent_packages) + 4
version_header_len = len("Minimum Version") + 4
tags_header_len = max(len(tags) for _, tags in dependent_packages.values()) + 4
output = StringIO()
output.write(
" ".join(
["=" * package_header_len, "=" * version_header_len, "=" * tags_header_len]
)
)
output.write("\n")
dependency_title = "Dependency"
version_title = "Minimum Version"
tags_title = "Purpose"
output.write(
f"{dependency_title:<{package_header_len}} "
f"{version_title:<{version_header_len}} "
f"{tags_title}\n"
)
output.write(
" ".join(
["=" * package_header_len, "=" * version_header_len, "=" * tags_header_len]
)
)
output.write("\n")
for package, (version, tags) in dependent_packages.items():
output.write(
f"{package:<{package_header_len}} {version:<{version_header_len}} {tags}\n"
)
output.write(
" ".join(
["=" * package_header_len, "=" * version_header_len, "=" * tags_header_len]
)
)
output.write("\n")
output = output.getvalue()
with (Path(".") / "min_dependency_table.rst").open("w") as f:
f.write(output)
def generate_min_dependency_substitutions(app):
"""Generate min dependency substitutions for docs."""
from sklearn._min_dependencies import dependent_packages
output = StringIO()
for package, (version, _) in dependent_packages.items():
package = package.capitalize()
output.write(f".. |{package}MinVersion| replace:: {version}")
output.write("\n")
output = output.getvalue()
with (Path(".") / "min_dependency_substitutions.rst").open("w") as f:
f.write(output)
# -- Additional temporary hacks -----------------------------------------------
# Temporary work-around for spacing problem between parameter and parameter
# type in the doc, see https://github.com/numpy/numpydoc/issues/215. The bug
# has been fixed in sphinx (https://github.com/sphinx-doc/sphinx/pull/5976) but
# through a change in sphinx basic.css except rtd_theme does not use basic.css.
# In an ideal world, this would get fixed in this PR:
# https://github.com/readthedocs/sphinx_rtd_theme/pull/747/files
def setup(app):
app.connect("builder-inited", generate_min_dependency_table)
app.connect("builder-inited", generate_min_dependency_substitutions)
app.add_css_file("basic.css")
| 10,651 | 30.514793 | 87 | py |
imbalanced-learn | imbalanced-learn-master/doc/sphinxext/github_link.py | import inspect
import os
import subprocess
import sys
from functools import partial
from operator import attrgetter
REVISION_CMD = "git rev-parse --short HEAD"
def _get_git_revision():
try:
revision = subprocess.check_output(REVISION_CMD.split()).strip()
except (subprocess.CalledProcessError, OSError):
print("Failed to execute git to get revision")
return None
return revision.decode("utf-8")
def _linkcode_resolve(domain, info, package, url_fmt, revision):
"""Determine a link to online source for a class/method/function
This is called by sphinx.ext.linkcode
An example with a long-untouched module that everyone has
>>> _linkcode_resolve('py', {'module': 'tty',
... 'fullname': 'setraw'},
... package='tty',
... url_fmt='http://hg.python.org/cpython/file/'
... '{revision}/Lib/{package}/{path}#L{lineno}',
... revision='xxxx')
'http://hg.python.org/cpython/file/xxxx/Lib/tty/tty.py#L18'
"""
if revision is None:
return
if domain not in ("py", "pyx"):
return
if not info.get("module") or not info.get("fullname"):
return
class_name = info["fullname"].split(".")[0]
if type(class_name) != str:
# Python 2 only
class_name = class_name.encode("utf-8")
module = __import__(info["module"], fromlist=[class_name])
obj = attrgetter(info["fullname"])(module)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except Exception:
fn = None
if not fn:
return
fn = os.path.relpath(fn, start=os.path.dirname(__import__(package).__file__))
try:
lineno = inspect.getsourcelines(obj)[1]
except Exception:
lineno = ""
return url_fmt.format(revision=revision, package=package, path=fn, lineno=lineno)
def make_linkcode_resolve(package, url_fmt):
"""Returns a linkcode_resolve function for the given URL format
revision is a git commit reference (hash or name)
package is the name of the root module of the package
url_fmt is along the lines of ('https://github.com/USER/PROJECT/'
'blob/{revision}/{package}/'
'{path}#L{lineno}')
"""
revision = _get_git_revision()
return partial(
_linkcode_resolve, revision=revision, package=package, url_fmt=url_fmt
)
| 2,616 | 30.154762 | 85 | py |
imbalanced-learn | imbalanced-learn-master/doc/sphinxext/sphinx_issues.py | # -*- coding: utf-8 -*-
"""A Sphinx extension for linking to your project's issue tracker.
Copyright 2014 Steven Loria
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import re
from docutils import nodes, utils
from sphinx.util.nodes import split_explicit_title
__version__ = "1.2.0"
__author__ = "Steven Loria"
__license__ = "MIT"
def user_role(name, rawtext, text, lineno, inliner, options=None, content=None):
"""Sphinx role for linking to a user profile. Defaults to linking to
Github profiles, but the profile URIS can be configured via the
``issues_user_uri`` config value.
Examples: ::
:user:`sloria`
Anchor text also works: ::
:user:`Steven Loria <sloria>`
"""
options = options or {}
content = content or []
has_explicit_title, title, target = split_explicit_title(text)
target = utils.unescape(target).strip()
title = utils.unescape(title).strip()
config = inliner.document.settings.env.app.config
if config.issues_user_uri:
ref = config.issues_user_uri.format(user=target)
else:
ref = "https://github.com/{0}".format(target)
if has_explicit_title:
text = title
else:
text = "@{0}".format(target)
link = nodes.reference(text=text, refuri=ref, **options)
return [link], []
def cve_role(name, rawtext, text, lineno, inliner, options=None, content=None):
"""Sphinx role for linking to a CVE on https://cve.mitre.org.
Examples: ::
:cve:`CVE-2018-17175`
"""
options = options or {}
content = content or []
has_explicit_title, title, target = split_explicit_title(text)
target = utils.unescape(target).strip()
title = utils.unescape(title).strip()
ref = "https://cve.mitre.org/cgi-bin/cvename.cgi?name={0}".format(target)
text = title if has_explicit_title else target
link = nodes.reference(text=text, refuri=ref, **options)
return [link], []
class IssueRole(object):
EXTERNAL_REPO_REGEX = re.compile(r"^(\w+)/(.+)([#@])([\w]+)$")
def __init__(
self,
uri_config_option,
format_kwarg,
github_uri_template,
format_text=None,
):
self.uri_config_option = uri_config_option
self.format_kwarg = format_kwarg
self.github_uri_template = github_uri_template
self.format_text = format_text or self.default_format_text
@staticmethod
def default_format_text(issue_no):
return "#{0}".format(issue_no)
def make_node(self, name, issue_no, config, options=None):
name_map = {"pr": "pull", "issue": "issues", "commit": "commit"}
options = options or {}
repo_match = self.EXTERNAL_REPO_REGEX.match(issue_no)
if repo_match: # External repo
username, repo, symbol, issue = repo_match.groups()
if name not in name_map:
raise ValueError(
"External repo linking not supported for :{}:".format(name)
)
path = name_map.get(name)
ref = "https://github.com/{issues_github_path}/{path}/{n}".format(
issues_github_path="{}/{}".format(username, repo),
path=path,
n=issue,
)
formatted_issue = self.format_text(issue).lstrip("#")
text = "{username}/{repo}{symbol}{formatted_issue}".format(**locals())
link = nodes.reference(text=text, refuri=ref, **options)
return link
if issue_no not in ("-", "0"):
uri_template = getattr(config, self.uri_config_option, None)
if uri_template:
ref = uri_template.format(**{self.format_kwarg: issue_no})
elif config.issues_github_path:
ref = self.github_uri_template.format(
issues_github_path=config.issues_github_path, n=issue_no
)
else:
raise ValueError(
"Neither {} nor issues_github_path "
"is set".format(self.uri_config_option)
)
issue_text = self.format_text(issue_no)
link = nodes.reference(text=issue_text, refuri=ref, **options)
else:
link = None
return link
def __call__(
self, name, rawtext, text, lineno, inliner, options=None, content=None
):
options = options or {}
content = content or []
issue_nos = [each.strip() for each in utils.unescape(text).split(",")]
config = inliner.document.settings.env.app.config
ret = []
for i, issue_no in enumerate(issue_nos):
node = self.make_node(name, issue_no, config, options=options)
ret.append(node)
if i != len(issue_nos) - 1:
sep = nodes.raw(text=", ", format="html")
ret.append(sep)
return ret, []
"""Sphinx role for linking to an issue. Must have
`issues_uri` or `issues_github_path` configured in ``conf.py``.
Examples: ::
:issue:`123`
:issue:`42,45`
:issue:`sloria/konch#123`
"""
issue_role = IssueRole(
uri_config_option="issues_uri",
format_kwarg="issue",
github_uri_template="https://github.com/{issues_github_path}/issues/{n}",
)
"""Sphinx role for linking to a pull request. Must have
`issues_pr_uri` or `issues_github_path` configured in ``conf.py``.
Examples: ::
:pr:`123`
:pr:`42,45`
:pr:`sloria/konch#43`
"""
pr_role = IssueRole(
uri_config_option="issues_pr_uri",
format_kwarg="pr",
github_uri_template="https://github.com/{issues_github_path}/pull/{n}",
)
def format_commit_text(sha):
return sha[:7]
"""Sphinx role for linking to a commit. Must have
`issues_pr_uri` or `issues_github_path` configured in ``conf.py``.
Examples: ::
:commit:`123abc456def`
:commit:`sloria/konch@123abc456def`
"""
commit_role = IssueRole(
uri_config_option="issues_commit_uri",
format_kwarg="commit",
github_uri_template="https://github.com/{issues_github_path}/commit/{n}",
format_text=format_commit_text,
)
def setup(app):
# Format template for issues URI
# e.g. 'https://github.com/sloria/marshmallow/issues/{issue}
app.add_config_value("issues_uri", default=None, rebuild="html")
# Format template for PR URI
# e.g. 'https://github.com/sloria/marshmallow/pull/{issue}
app.add_config_value("issues_pr_uri", default=None, rebuild="html")
# Format template for commit URI
# e.g. 'https://github.com/sloria/marshmallow/commits/{commit}
app.add_config_value("issues_commit_uri", default=None, rebuild="html")
# Shortcut for Github, e.g. 'sloria/marshmallow'
app.add_config_value("issues_github_path", default=None, rebuild="html")
# Format template for user profile URI
# e.g. 'https://github.com/{user}'
app.add_config_value("issues_user_uri", default=None, rebuild="html")
app.add_role("issue", issue_role)
app.add_role("pr", pr_role)
app.add_role("user", user_role)
app.add_role("commit", commit_role)
app.add_role("cve", cve_role)
return {
"version": __version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
}
| 8,183 | 35.535714 | 82 | py |
ML-Doctor | ML-Doctor-main/demo.py | import os
import sys
import torch
import argparse
import torch.nn as nn
import torchvision.models as models
from doctor.meminf import *
from doctor.modinv import *
from doctor.attrinf import *
from doctor.modsteal import *
from demoloader.train import *
from demoloader.DCGAN import *
from utils.define_models import *
from demoloader.dataloader import *
def train_model(PATH, device, train_set, test_set, model, use_DP, noise, norm, delta):
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=64, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(
test_set, batch_size=64, shuffle=True, num_workers=2)
model = model_training(train_loader, test_loader, model, device, use_DP, noise, norm, delta)
acc_train = 0
acc_test = 0
for i in range(100):
print("<======================= Epoch " + str(i+1) + " =======================>")
print("target training")
acc_train = model.train()
print("target testing")
acc_test = model.test()
overfitting = round(acc_train - acc_test, 6)
print('The overfitting rate is %s' % overfitting)
FILE_PATH = PATH + "_target.pth"
model.saveModel(FILE_PATH)
print("Saved target model!!!")
print("Finished training!!!")
return acc_train, acc_test, overfitting
def train_DCGAN(PATH, device, train_set, name):
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=128, shuffle=True, num_workers=2)
if name.lower() == 'fmnist':
D = FashionDiscriminator().eval()
G = FashionGenerator().eval()
else:
D = Discriminator(ngpu=1).eval()
G = Generator(ngpu=1).eval()
print("Starting Training DCGAN...")
GAN = GAN_training(train_loader, D, G, device)
for i in range(200):
print("<======================= Epoch " + str(i+1) + " =======================>")
GAN.train()
GAN.saveModel(PATH + "_discriminator.pth", PATH + "_generator.pth")
def test_meminf(PATH, device, num_classes, target_train, target_test, shadow_train, shadow_test, target_model, shadow_model, train_shadow, use_DP, noise, norm, delta, mode):
batch_size = 64
if train_shadow:
shadow_trainloader = torch.utils.data.DataLoader(
shadow_train, batch_size=batch_size, shuffle=True, num_workers=2)
shadow_testloader = torch.utils.data.DataLoader(
shadow_test, batch_size=batch_size, shuffle=True, num_workers=2)
loss = nn.CrossEntropyLoss()
optimizer = optim.SGD(shadow_model.parameters(), lr=1e-2, momentum=0.9, weight_decay=5e-4)
train_shadow_model(PATH, device, shadow_model, shadow_trainloader, shadow_testloader, use_DP, noise, norm, loss, optimizer, delta)
if mode == 0 or mode == 3:
attack_trainloader, attack_testloader = get_attack_dataset_with_shadow(
target_train, target_test, shadow_train, shadow_test, batch_size)
else:
attack_trainloader, attack_testloader = get_attack_dataset_without_shadow(target_train, target_test, batch_size)
#for white box
if mode == 2 or mode == 3:
gradient_size = get_gradient_size(target_model)
total = gradient_size[0][0] // 2 * gradient_size[0][1] // 2
if mode == 0:
attack_model = ShadowAttackModel(num_classes)
attack_mode0(PATH + "_target.pth", PATH + "_shadow.pth", PATH, device, attack_trainloader, attack_testloader, target_model, shadow_model, attack_model, 1, num_classes)
elif mode == 1:
attack_model = PartialAttackModel(num_classes)
attack_mode1(PATH + "_target.pth", PATH, device, attack_trainloader, attack_testloader, target_model, attack_model, 1, num_classes)
elif mode == 2:
attack_model = WhiteBoxAttackModel(num_classes, total)
attack_mode2(PATH + "_target.pth", PATH, device, attack_trainloader, attack_testloader, target_model, attack_model, 1, num_classes)
elif mode == 3:
attack_model = WhiteBoxAttackModel(num_classes, total)
attack_mode3(PATH + "_target.pth", PATH + "_shadow.pth", PATH, device,
attack_trainloader, attack_testloader, target_model, shadow_model, attack_model, 1, num_classes)
else:
raise Exception("Wrong mode")
# attack_mode0(PATH + "_target.pth", PATH + "_shadow.pth", PATH, device, attack_trainloader, attack_testloader, target_model, shadow_model, attack_model, 1, num_classes)
# attack_mode1(PATH + "_target.pth", PATH, device, attack_trainloader, attack_testloader, target_model, attack_model, 1, num_classes)
# attack_mode2(PATH + "_target.pth", PATH, device, attack_trainloader, attack_testloader, target_model, attack_model, 1, num_classes)
def test_modinv(PATH, device, num_classes, target_train, target_model, name):
size = (1,) + tuple(target_train[0][0].shape)
target_model, evaluation_model = load_data(PATH + "_target.pth", PATH + "_eval.pth", target_model, models.resnet18(num_classes=num_classes))
# CCS 15
modinv_ccs = ccs_inversion(target_model, size, num_classes, 1, 3000, 100, 0.001, 0.003, device)
train_loader = torch.utils.data.DataLoader(target_train, batch_size=1, shuffle=False)
ccs_result = modinv_ccs.reverse_mse(train_loader)
# Secret Revealer
if name.lower() == 'fmnist':
D = FashionDiscriminator(ngpu=1).eval()
G = FashionGenerator(ngpu=1).eval()
else:
D = Discriminator(ngpu=1).eval()
G = Generator(ngpu=1).eval()
PATH_D = PATH + "_discriminator.pth"
PATH_G = PATH + "_generator.pth"
D, G, iden = prepare_GAN(name, D, G, PATH_D, PATH_G)
modinv_revealer = revealer_inversion(G, D, target_model, evaluation_model, iden, device)
def test_attrinf(PATH, device, num_classes, target_train, target_test, target_model):
attack_length = int(0.5 * len(target_train))
rest = len(target_train) - attack_length
attack_train, _ = torch.utils.data.random_split(target_train, [attack_length, rest])
attack_test = target_test
attack_trainloader = torch.utils.data.DataLoader(
attack_train, batch_size=64, shuffle=True, num_workers=2)
attack_testloader = torch.utils.data.DataLoader(
attack_test, batch_size=64, shuffle=True, num_workers=2)
image_size = [1] + list(target_train[0][0].shape)
train_attack_model(
PATH + "_target.pth", PATH, num_classes[1], device, target_model, attack_trainloader, attack_testloader, image_size)
def test_modsteal(PATH, device, train_set, test_set, target_model, attack_model):
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=64, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(
test_set, batch_size=64, shuffle=True, num_workers=2)
loss = nn.MSELoss()
optimizer = optim.SGD(attack_model.parameters(), lr=0.01, momentum=0.9)
attacking = train_steal_model(
train_loader, test_loader, target_model, attack_model, PATH + "_target.pth", PATH + "_modsteal.pth", device, 64, loss, optimizer)
for i in range(100):
print("[Epoch %d/%d] attack training"%((i+1), 100))
attacking.train_with_same_distribution()
print("Finished training!!!")
attacking.saveModel()
acc_test, agreement_test = attacking.test()
print("Saved Target Model!!!\nstolen test acc = %.3f, stolen test agreement = %.3f\n"%(acc_test, agreement_test))
def str_to_bool(string):
if isinstance(string, bool):
return string
if string.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif string.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gpu', type=str, default="0")
parser.add_argument('-a', '--attributes', type=str, default="race", help="For attrinf, two attributes should be in format x_y e.g. race_gender")
parser.add_argument('-dn', '--dataset_name', type=str, default="UTKFace")
parser.add_argument('-at', '--attack_type', type=int, default=0)
parser.add_argument('-tm', '--train_model', action='store_true')
parser.add_argument('-ts', '--train_shadow', action='store_true')
parser.add_argument('-ud', '--use_DP', action='store_true',)
parser.add_argument('-ne', '--noise', type=float, default=1.3)
parser.add_argument('-nm', '--norm', type=float, default=1.5)
parser.add_argument('-d', '--delta', type=float, default=1e-5)
parser.add_argument('-m', '--mode', type=int, default=0)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
device = torch.device("cuda:0")
dataset_name = args.dataset_name
attr = args.attributes
if "_" in attr:
attr = attr.split("_")
root = "../data"
use_DP = args.use_DP
noise = args.noise
norm = args.norm
delta = args.delta
mode = args.mode
train_shadow = args.train_shadow
TARGET_ROOT = "./demoloader/trained_model/"
if not os.path.exists(TARGET_ROOT):
print(f"Create directory named {TARGET_ROOT}")
os.makedirs(TARGET_ROOT)
TARGET_PATH = TARGET_ROOT + dataset_name
num_classes, target_train, target_test, shadow_train, shadow_test, target_model, shadow_model = prepare_dataset(dataset_name, attr, root)
if args.train_model:
train_model(TARGET_PATH, device, target_train, target_test, target_model, use_DP, noise, norm, delta)
# membership inference
if args.attack_type == 0:
test_meminf(TARGET_PATH, device, num_classes, target_train, target_test, shadow_train, shadow_test, target_model, shadow_model, train_shadow, use_DP, noise, norm, delta, mode)
# model inversion
elif args.attack_type == 1:
train_DCGAN(TARGET_PATH, device, shadow_test + shadow_train, dataset_name)
test_modinv(TARGET_PATH, device, num_classes, target_train, target_model, dataset_name)
# attribut inference
elif args.attack_type == 2:
test_attrinf(TARGET_PATH, device, num_classes, target_train, target_test, target_model)
# model stealing
elif args.attack_type == 3:
test_modsteal(TARGET_PATH, device, shadow_train+shadow_test, target_test, target_model, shadow_model)
else:
sys.exit("we have not supported this mode yet! 0c0")
# target_model = models.resnet18(num_classes=num_classes)
# train_model(TARGET_PATH, device, target_train + shadow_train, target_test + shadow_test, target_model)
if __name__ == "__main__":
main()
| 10,614 | 41.290837 | 183 | py |
ML-Doctor | ML-Doctor-main/demoloader/dataloader.py | import os
import torch
import pandas
import torchvision
torch.manual_seed(0)
import torch.nn as nn
import PIL.Image as Image
import torchvision.transforms as transforms
from functools import partial
from typing import Any, Callable, List, Optional, Union, Tuple
class CNN(nn.Module):
def __init__(self, input_channel=3, num_classes=10):
super(CNN, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(input_channel, 32, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(32, 64, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(64, 128, kernel_size=3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
)
self.classifier = nn.Sequential(
nn.Linear(128*6*6, 512),
nn.ReLU(),
nn.Linear(512, num_classes),
)
def forward(self, x):
x = self.features(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
class UTKFaceDataset(torch.utils.data.Dataset):
def __init__(self, root, attr: Union[List[str], str] = "gender", transform=None, target_transform=None)-> None:
self.root = root
self.transform = transform
self.target_transform = target_transform
self.processed_path = os.path.join(self.root, 'UTKFace/processed/')
self.files = os.listdir(self.processed_path)
if isinstance(attr, list):
self.attr = attr
else:
self.attr = [attr]
self.lines = []
for txt_file in self.files:
txt_file_path = os.path.join(self.processed_path, txt_file)
with open(txt_file_path, 'r') as f:
assert f is not None
for i in f:
image_name = i.split('jpg ')[0]
attrs = image_name.split('_')
if len(attrs) < 4 or int(attrs[2]) >= 4 or '' in attrs:
continue
self.lines.append(image_name+'jpg')
def __len__(self):
return len(self.lines)
def __getitem__(self, index:int)-> Tuple[Any, Any]:
attrs = self.lines[index].split('_')
age = int(attrs[0])
gender = int(attrs[1])
race = int(attrs[2])
image_path = os.path.join(self.root, 'UTKFace/raw/', self.lines[index]+'.chip.jpg').rstrip()
image = Image.open(image_path).convert('RGB')
target: Any = []
for t in self.attr:
if t == "age":
target.append(age)
elif t == "gender":
target.append(gender)
elif t == "race":
target.append(race)
else:
raise ValueError("Target type \"{}\" is not recognized.".format(t))
if self.transform:
image = self.transform(image)
if target:
target = tuple(target) if len(target) > 1 else target[0]
if self.target_transform is not None:
target = self.target_transform(target)
else:
target = None
return image, target
class CelebA(torch.utils.data.Dataset):
base_folder = "celeba"
def __init__(
self,
root: str,
attr_list: str,
target_type: Union[List[str], str] = "attr",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> None:
if isinstance(target_type, list):
self.target_type = target_type
else:
self.target_type = [target_type]
self.root = root
self.transform = transform
self.target_transform =target_transform
self.attr_list = attr_list
fn = partial(os.path.join, self.root, self.base_folder)
splits = pandas.read_csv(fn("list_eval_partition.txt"), delim_whitespace=True, header=None, index_col=0)
attr = pandas.read_csv(fn("list_attr_celeba.txt"), delim_whitespace=True, header=1)
mask = slice(None)
self.filename = splits[mask].index.values
self.attr = torch.as_tensor(attr[mask].values)
self.attr = (self.attr + 1) // 2 # map from {-1, 1} to {0, 1}
self.attr_names = list(attr.columns)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
X = Image.open(os.path.join(self.root, self.base_folder, "img_celeba", self.filename[index]))
target: Any = []
for t, nums in zip(self.target_type, self.attr_list):
if t == "attr":
final_attr = 0
for i in range(len(nums)):
final_attr += 2 ** i * self.attr[index][nums[i]]
target.append(final_attr)
else:
# TODO: refactor with utils.verify_str_arg
raise ValueError("Target type \"{}\" is not recognized.".format(t))
if self.transform is not None:
X = self.transform(X)
if target:
target = tuple(target) if len(target) > 1 else target[0]
if self.target_transform is not None:
target = self.target_transform(target)
else:
target = None
return X, target
def __len__(self) -> int:
return len(self.attr)
def extra_repr(self) -> str:
lines = ["Target type: {target_type}", "Split: {split}"]
return '\n'.join(lines).format(**self.__dict__)
def prepare_dataset(dataset, attr, root):
num_classes, dataset, target_model, shadow_model = get_model_dataset(dataset, attr=attr, root=root)
length = len(dataset)
each_length = length//4
target_train, target_test, shadow_train, shadow_test, _ = torch.utils.data.random_split(dataset, [each_length, each_length, each_length, each_length, len(dataset)-(each_length*4)])
return num_classes, target_train, target_test, shadow_train, shadow_test, target_model, shadow_model
def get_model_dataset(dataset_name, attr, root):
if dataset_name.lower() == "utkface":
if isinstance(attr, list):
num_classes = []
for a in attr:
if a == "age":
num_classes.append(117)
elif a == "gender":
num_classes.append(2)
elif a == "race":
num_classes.append(4)
else:
raise ValueError("Target type \"{}\" is not recognized.".format(a))
else:
if attr == "age":
num_classes = 117
elif attr == "gender":
num_classes = 2
elif attr == "race":
num_classes = 4
else:
raise ValueError("Target type \"{}\" is not recognized.".format(attr))
transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Resize((64, 64)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
dataset = UTKFaceDataset(root=root, attr=attr, transform=transform)
input_channel = 3
elif dataset_name.lower() == "celeba":
if isinstance(attr, list):
for a in attr:
if a != "attr":
raise ValueError("Target type \"{}\" is not recognized.".format(a))
num_classes = [8, 4]
# heavyMakeup MouthSlightlyOpen Smiling, Male Young
attr_list = [[18, 21, 31], [20, 39]]
else:
if attr == "attr":
num_classes = 8
attr_list = [[18, 21, 31]]
else:
raise ValueError("Target type \"{}\" is not recognized.".format(attr))
transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.Resize((64, 64)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
dataset = CelebA(root=root, attr_list=attr_list, target_type=attr, transform=transform)
input_channel = 3
elif dataset_name.lower() == "stl10":
num_classes = 10
transform = transforms.Compose([
transforms.Resize((64, 64)),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_set = torchvision.datasets.STL10(
root=root, split='train', transform=transform, download=True)
test_set = torchvision.datasets.STL10(
root=root, split='test', transform=transform, download=True)
dataset = train_set + test_set
input_channel = 3
elif dataset_name.lower() == "fmnist":
num_classes = 10
transform = transforms.Compose([
transforms.Resize((64, 64)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_set = torchvision.datasets.FashionMNIST(
root=root, train=True, download=True, transform=transform)
test_set = torchvision.datasets.FashionMNIST(
root=root, train=False, download=True, transform=transform)
dataset = train_set + test_set
input_channel = 1
if isinstance(num_classes, int):
target_model = CNN(input_channel=input_channel, num_classes=num_classes)
shadow_model = CNN(input_channel=input_channel, num_classes=num_classes)
else:
target_model = CNN(input_channel=input_channel, num_classes=num_classes[0])
shadow_model = CNN(input_channel=input_channel, num_classes=num_classes[0])
return num_classes, dataset, target_model, shadow_model | 9,806 | 33.900356 | 184 | py |
ML-Doctor | ML-Doctor-main/demoloader/train.py | import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
np.set_printoptions(threshold=np.inf)
from opacus import PrivacyEngine
from torch.optim import lr_scheduler
def GAN_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class model_training():
def __init__(self, trainloader, testloader, model, device, use_DP, noise, norm, delta):
self.use_DP = use_DP
self.device = device
self.delta = delta
self.net = model.to(self.device)
self.trainloader = trainloader
self.testloader = testloader
if self.device == 'cuda':
self.net = torch.nn.DataParallel(self.net)
cudnn.benchmark = True
self.criterion = nn.CrossEntropyLoss()
self.optimizer = optim.SGD(self.net.parameters(), lr=1e-2, momentum=0.9, weight_decay=5e-4)
self.noise_multiplier, self.max_grad_norm = noise, norm
if self.use_DP:
self.privacy_engine = PrivacyEngine()
self.model, self.optimizer, self.trainloader = self.privacy_engine.make_private(
module=model,
optimizer=self.optimizer,
data_loader=self.trainloader,
noise_multiplier=self.noise_multiplier,
max_grad_norm=self.max_grad_norm,
)
# self.net = module_modification.convert_batchnorm_modules(self.net)
# inspector = DPModelInspector()
# inspector.validate(self.net)
# privacy_engine = PrivacyEngine(
# self.net,
# batch_size=64,
# sample_size=len(self.trainloader.dataset),
# alphas=[1 + x / 10.0 for x in range(1, 100)] + list(range(12, 64)),
# noise_multiplier=self.noise_multiplier,
# max_grad_norm=self.max_grad_norm,
# secure_rng=False,
# )
print( 'noise_multiplier: %.3f | max_grad_norm: %.3f' % (self.noise_multiplier, self.max_grad_norm))
# privacy_engine.attach(self.optimizer)
self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, [50, 75], 0.1)
# Training
def train(self):
self.net.train()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(self.trainloader):
if isinstance(targets, list):
targets = targets[0]
if str(self.criterion) != "CrossEntropyLoss()":
targets = torch.from_numpy(np.eye(self.num_classes)[targets]).float()
inputs, targets = inputs.to(self.device), targets.to(self.device)
self.optimizer.zero_grad()
outputs = self.net(inputs)
loss = self.criterion(outputs, targets)
loss.backward()
self.optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
if str(self.criterion) != "CrossEntropyLoss()":
_, targets= targets.max(1)
correct += predicted.eq(targets).sum().item()
if self.use_DP:
epsilon = self.privacy_engine.accountant.get_epsilon(delta=self.delta)
# epsilon, best_alpha = self.optimizer.privacy_engine.get_privacy_spent(1e-5)
print("\u03B5: %.3f \u03B4: 1e-5" % (epsilon))
self.scheduler.step()
print( 'Train Acc: %.3f%% (%d/%d) | Loss: %.3f' % (100.*correct/total, correct, total, 1.*train_loss/batch_idx))
return 1.*correct/total
def saveModel(self, path):
torch.save(self.net.state_dict(), path)
def get_noise_norm(self):
return self.noise_multiplier, self.max_grad_norm
def test(self):
self.net.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for inputs, targets in self.testloader:
if isinstance(targets, list):
targets = targets[0]
if str(self.criterion) != "CrossEntropyLoss()":
targets = torch.from_numpy(np.eye(self.num_classes)[targets]).float()
inputs, targets = inputs.to(self.device), targets.to(self.device)
outputs = self.net(inputs)
loss = self.criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
if str(self.criterion) != "CrossEntropyLoss()":
_, targets= targets.max(1)
correct += predicted.eq(targets).sum().item()
print( 'Test Acc: %.3f%% (%d/%d)' % (100.*correct/total, correct, total))
return 1.*correct/total
class distillation_training():
def __init__(self, PATH, trainloader, testloader, model, teacher, device):
self.device = device
self.model = model.to(self.device)
self.trainloader = trainloader
self.testloader = testloader
self.PATH = PATH
self.teacher = teacher.to(self.device)
self.teacher.load_state_dict(torch.load(self.PATH))
if self.device == 'cuda':
self.model = torch.nn.DataParallel(self.model)
cudnn.benchmark = True
self.criterion = nn.KLDivLoss(reduction='batchmean')
self.optimizer = optim.SGD(self.model.parameters(), lr=0.01, momentum=0.9, weight_decay=5e-4)
self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, [50, 100], 0.1)
def distillation_loss(self, y, labels, teacher_scores, T, alpha):
loss = self.criterion(F.log_softmax(y/T, dim=1), F.softmax(teacher_scores/T, dim=1))
loss = loss * (T*T * 2.0 * alpha) + F.cross_entropy(y, labels) * (1. - alpha)
return loss
def train(self):
self.model.train()
self.teacher.eval()
train_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(self.trainloader):
inputs, targets = inputs.to(self.device), targets.to(self.device)
self.optimizer.zero_grad()
outputs = self.model(inputs)
teacher_output = self.teacher(inputs)
teacher_output = teacher_output.detach()
loss = self.distillation_loss(outputs, targets, teacher_output, T=20.0, alpha=0.7)
loss.backward()
self.optimizer.step()
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
self.scheduler.step()
print( 'Train Acc: %.3f%% (%d/%d) | Loss: %.3f' % (100.*correct/total, correct, total, 1.*train_loss/batch_idx))
return 1.*correct/total
def saveModel(self, path):
torch.save(self.model.state_dict(), path)
def test(self):
self.model.eval()
correct = 0
total = 0
with torch.no_grad():
for inputs, targets in self.testloader:
inputs, targets = inputs.to(self.device), targets.to(self.device)
outputs = self.model(inputs)
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
print( 'Test Acc: %.3f%% (%d/%d)' % (100.*correct/total, correct, total))
return 1.*correct/total
class GAN_training():
def __init__(self, trainloader, model_discriminator, model_generator, device):
self.device = device
self.trainloader = trainloader
self.model_discriminator = model_discriminator.to(self.device)
self.model_generator = model_generator.to(self.device)
self.model_discriminator.apply(GAN_init)
self.model_generator.apply(GAN_init)
self.criterion = nn.BCELoss()
self.optimizer_discriminator = optim.Adam(model_discriminator.parameters(), lr=0.0002, betas=(0.5, 0.999))
self.optimizer_generator = optim.Adam(model_generator.parameters(), lr=0.0002, betas=(0.5, 0.999))
self.real_label = 1.
self.fake_label = 0.
def train(self):
# For each batch in the dataloader
for i, data in enumerate(self.trainloader, 0):
self.model_discriminator.zero_grad()
# Format batch
real_cpu = data[0].to(self.device)
b_size = real_cpu.size(0)
label = torch.full((b_size,), self.real_label, dtype=torch.float, device=self.device)
# Forward pass real batch through D
output = self.model_discriminator(real_cpu).view(-1)
# Calculate loss on all-real batch
errD_real = self.criterion(output, label)
# Calculate gradients for D in backward pass
errD_real.backward()
D_x = output.mean().item()
## Train with all-fake batch
# Generate batch of latent vectors
noise = torch.randn(b_size, 100, 1, 1, device=self.device)
# Generate fake image batch with G
fake = self.model_generator(noise)
label.fill_(self.fake_label)
# Classify all fake batch with D
output = self.model_discriminator(fake.detach()).view(-1)
# Calculate D's loss on the all-fake batch
errD_fake = self.criterion(output, label)
# Calculate the gradients for this batch, accumulated (summed) with previous gradients
errD_fake.backward()
D_G_z1 = output.mean().item()
# Compute error of D as sum over the fake and the real batches
errD = errD_real + errD_fake
# Update D
self.optimizer_discriminator.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
self.model_generator.zero_grad()
label.fill_(self.real_label) # fake labels are real for generator cost
# Since we just updated D, perform another forward pass of all-fake batch through D
output = self.model_discriminator(fake).view(-1)
# Calculate G's loss based on this output
errG = self.criterion(output, label)
# Calculate gradients for G
errG.backward()
D_G_z2 = output.mean().item()
# Update G
self.optimizer_generator.step()
# Output training stats
if i % 50 == 0:
print('[%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
% (i, len(self.trainloader), errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
# Save Losses for plotting later
# G_losses.append(errG.item())
# D_losses.append(errD.item())
# Check how the generator is doing by saving G's output on fixed_noise
# if (iters % 500 == 0) or ((epoch == 4) and (i == len(dataloader)-1)):
# with torch.no_grad():
# fake = netG(fixed_noise).detach().cpu()
# img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
def saveModel(self, path_d, path_g):
torch.save(self.model_discriminator.state_dict(), path_d)
torch.save(self.model_generator.state_dict(), path_g)
| 11,748 | 37.270358 | 120 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.