repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_lgamma.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
def _lgamma_cpu(x, dtype):
from scipy import special
return numpy.vectorize(special.gammaln, otypes=[dtype])(x)
def _lgamma_gpu(x, dtype):
return cuda.to_gpu(_lgamma_cpu(cuda.to_cpu(x), dtype))
def _lgamma_expected(x, dtype):
if backend.get_array_module(x) is numpy:
return _lgamma_cpu(x, dtype)
else:
return _lgamma_gpu(x, dtype)
def make_data(shape, dtype):
x = numpy.random.uniform(1., 10., shape).astype(dtype)
gy = numpy.random.uniform(-1., 1., shape).astype(dtype)
ggx = numpy.random.uniform(-1., 1., shape).astype(dtype)
return x, gy, ggx
@testing.unary_math_function_unittest(
F.lgamma,
func_expected=_lgamma_expected,
make_data=make_data,
backward_options={'eps': 1e-3, 'atol': 5e-2, 'rtol': 1e-4,
'dtype': numpy.float64},
double_backward_options={'eps': 1e-3, 'atol': 5e-2, 'rtol': 1e-4,
'dtype': numpy.float64}
)
@testing.with_requires('scipy')
class TestLGamma(unittest.TestCase):
pass
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))
@testing.without_requires('scipy')
class TestLGammaExceptions(unittest.TestCase):
def setUp(self):
self.x, self.gy, self.ggx = make_data(self.shape, self.dtype)
self.func = F.lgamma
def check_forward(self, x_data):
x = chainer.Variable(x_data)
with self.assertRaises(ImportError):
self.func(x)
def test_forward_cpu(self):
self.check_forward(self.x)
testing.run_module(__name__, __file__)
| 1,790
| 24.956522
| 69
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_ndtr.py
|
import math
import unittest
import numpy
from chainer import backend
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
from chainer import utils
def _ndtr_cpu(x, dtype):
erfc = numpy.vectorize(
lambda x: 0.5 * math.erfc(-x / 2 ** 0.5))
return utils.force_array(erfc(x), dtype=dtype)
def _ndtr_gpu(x, dtype):
return cuda.to_gpu(_ndtr_cpu(cuda.to_cpu(x), dtype))
def _ndtr_expected(x, dtype):
if backend.get_array_module(x) is numpy:
return _ndtr_cpu(x, dtype)
else:
return _ndtr_gpu(x, dtype)
@testing.unary_math_function_unittest(
F.ndtr,
func_expected=_ndtr_expected,
)
class TestNdtr(unittest.TestCase):
pass
testing.run_module(__name__, __file__)
| 760
| 18.512821
| 56
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_maximum.py
|
import unittest
import numpy
import chainer
from chainer import functions
from chainer import testing
from chainer.utils import type_check
@testing.parameterize(*testing.product({
'shape': [
# x1, x2, y
((3, 2), (3, 2), (3, 2)),
((), (), ()),
((3, 2), (3, 1), (3, 2)),
((2,), (3, 2), (3, 2)),
],
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestMaximum(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
eps = 1e-2
self.check_forward_options.update({'atol': 1e-4, 'rtol': 1e-3})
self.check_backward_options.update({
'atol': 1e-2, 'rtol': 1e-2})
self.check_double_backward_options.update({
'atol': 1e-2, 'rtol': 1e-2})
else:
eps = 1e-3
self.check_backward_options['eps'] = eps
self.check_double_backward_options['eps'] = eps
def generate_inputs(self):
x1_shape, x2_shape, y_shape = self.shape
x1 = numpy.random.uniform(-1, 1, x1_shape).astype(self.dtype)
x2 = numpy.random.uniform(-1, 1, x2_shape).astype(self.dtype)
return x1, x2
def forward(self, inputs, devices):
x1, x2 = inputs
return functions.maximum(x1, x2),
def forward_expected(self, inputs):
x1, x2 = inputs
expected = numpy.maximum(x1, x2)
expected = numpy.asarray(expected)
return expected,
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))
class TestMaximumInconsistentShapes(unittest.TestCase):
def test_maximum_inconsistent_shapes(self):
x1_data = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype)
x2_data = numpy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
x1 = chainer.Variable(x1_data)
x2 = chainer.Variable(x2_data)
with self.assertRaises(type_check.InvalidType):
functions.maximum(x1, x2)
testing.run_module(__name__, __file__)
| 2,553
| 28.022727
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_log_ndtr.py
|
import unittest
import numpy
from chainer import backend
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
def _log_ndtr_cpu(x, dtype):
from scipy import special
return special.log_ndtr(x).astype(dtype)
def _log_ndtr_gpu(x, dtype):
return cuda.to_gpu(_log_ndtr_cpu(cuda.to_cpu(x), dtype))
def _log_ndtr_expected(x, dtype):
if backend.get_array_module(x) is numpy:
return _log_ndtr_cpu(x, dtype)
else:
return _log_ndtr_gpu(x, dtype)
@testing.unary_math_function_unittest(
F.log_ndtr,
func_expected=_log_ndtr_expected,
)
@testing.with_requires('scipy')
class TestLogNdtr(unittest.TestCase):
pass
testing.run_module(__name__, __file__)
| 735
| 18.891892
| 60
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_cumsum.py
|
import unittest
import numpy
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import force_array
from chainer.utils import type_check
@testing.parameterize(*testing.product_dict(
[
{'shape': (1,), 'axis': 0},
{'shape': (2, 3, 4), 'axis': 0},
{'shape': (2, 3, 4), 'axis': 1},
{'shape': (2, 3, 4), 'axis': 2},
{'shape': (2, 3, 4), 'axis': -3},
{'shape': (2, 3, 4), 'axis': -2},
{'shape': (2, 3, 4), 'axis': -1},
{'shape': (2, 3, 4), 'axis': None},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestCumsum(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 1e-2})
self.check_backward_options.update({'atol': 1e-2})
self.check_double_backward_options.update({
'atol': 1e-1, 'eps': 0.01})
elif self.dtype == numpy.float32:
self.check_double_backward_options.update({'atol': 1e-3})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.cumsum(x, axis=self.axis),
def forward_expected(self, inputs):
x, = inputs
expected = numpy.cumsum(x, axis=self.axis)
expected = force_array(expected)
return expected,
@testing.parameterize(
{'axis': 3},
{'axis': -4},
)
class TestCumsumInvalidTypeAxis(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
def check_type_error(self, x):
with self.assertRaises(type_check.InvalidType):
functions.cumsum(x, self.axis)
def test_type_error_cpu(self):
self.check_type_error(self.x)
@attr.gpu
def test_type_error_gpu(self):
self.check_type_error(cuda.to_gpu(self.x))
class TestCumsumInvalidTypeError(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
def test_invalid_type_axis(self):
with self.assertRaises(TypeError):
functions.cumsum(self.x, [0])
with self.assertRaises(TypeError):
functions.cumsum(self.x, (0,))
testing.run_module(__name__, __file__)
| 2,858
| 25.472222
| 70
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_sum.py
|
import unittest
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'axis': [None, 0, 1, 2, -1, (0, 1), (1, 0), (0, -1), (-2, 0)],
'keepdims': [True, False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestSum(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 1e-3, 'rtol': 1e-2})
self.check_backward_options.update({'atol': 1e-3, 'rtol': 1e-2})
self.check_double_backward_options \
.update({'atol': 1e-3, 'rtol': 1e-2})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.sum(x, axis=self.axis, keepdims=self.keepdims),
def forward_expected(self, inputs):
x, = inputs
expected = x.sum(axis=self.axis, keepdims=self.keepdims)
expected = numpy.asarray(expected)
return expected,
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestSumError(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
def test_invalid_axis_type(self):
with self.assertRaises(TypeError):
functions.sum(self.x, axis=[0])
def test_invalid_axis_type_in_tuple(self):
with self.assertRaises(TypeError):
functions.sum(self.x, axis=(1, 'x'))
def test_duplicate_axis(self):
with self.assertRaises(ValueError):
functions.sum(self.x, axis=(0, 0))
def test_pos_neg_duplicate_axis(self):
with self.assertRaises(ValueError):
self.x.sum(axis=(1, -2))
testing.run_module(__name__, __file__)
| 2,332
| 27.45122
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_arctanh.py
|
import unittest
import numpy
import chainer.functions as F
from chainer import testing
def make_data(shape, dtype):
# Input values close to -1 or 1 would make tests unstable
x = numpy.random.uniform(-0.9, 0.9, shape).astype(dtype, copy=False)
gy = numpy.random.uniform(-1, 1, shape).astype(dtype, copy=False)
ggx = numpy.random.uniform(-1, 1, shape).astype(dtype, copy=False)
return x, gy, ggx
@testing.unary_math_function_unittest(
F.arctanh,
make_data=make_data,
backward_options={'eps': 1e-3},
double_backward_options={'eps': 1e-3},
)
class TestArctanh(unittest.TestCase):
pass
testing.run_module(__name__, __file__)
| 668
| 22.892857
| 72
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_ndtri.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
def _ndtri_cpu(x, dtype):
from scipy import special
return numpy.vectorize(special.ndtri, otypes=[dtype])(x)
def _ndtri_gpu(x, dtype):
return cuda.to_gpu(_ndtri_cpu(cuda.to_cpu(x), dtype))
def _ndtri_expected(x, dtype):
if backend.get_array_module(x) is numpy:
return _ndtri_cpu(x, dtype)
else:
return _ndtri_gpu(x, dtype)
def make_data(shape, dtype):
x = numpy.random.uniform(0.1, 0.9, shape).astype(dtype)
gy = numpy.random.uniform(-1, 1, shape).astype(dtype)
ggx = numpy.random.uniform(-1, 1, shape).astype(dtype)
return x, gy, ggx
@testing.unary_math_function_unittest(
F.ndtri,
func_expected=_ndtri_expected,
make_data=make_data,
forward_options={'atol': 1e-3, 'rtol': 1e-3},
backward_options={'eps': 1e-6},
double_backward_options={'eps': 1e-6}
)
@testing.with_requires('scipy')
class TestNdtri(unittest.TestCase):
pass
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))
@testing.without_requires('scipy')
class TestNdtriExceptions(unittest.TestCase):
def setUp(self):
self.x, self.gy, self.ggx = make_data(self.shape, self.dtype)
self.func = F.ndtri
def check_forward(self, x_data):
x = chainer.Variable(x_data)
with self.assertRaises(ImportError):
self.func(x)
def test_forward_cpu(self):
self.check_forward(self.x)
testing.run_module(__name__, __file__)
| 1,669
| 23.558824
| 69
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_scale.py
|
import unittest
import numpy
import chainer
from chainer import functions
from chainer import testing
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1']
})
)
class TestScale(testing.FunctionTestCase):
def setUp(self):
self.axis = 1
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
return x1, x2
def forward_expected(self, inputs):
x1, x2 = inputs
y_expected = numpy.copy(x1)
for i, j, k in numpy.ndindex(y_expected.shape):
y_expected[i, j, k] *= x2[j]
return y_expected,
def forward(self, inputs, device):
x1, x2 = inputs
y = functions.scale(x1, x2, self.axis)
return y,
class TestScaleInvalidShape(unittest.TestCase):
def test_scale_invalid_shape(self):
x1 = chainer.Variable(numpy.zeros((3, 2, 3), numpy.float32))
x2 = chainer.Variable(numpy.zeros((2), numpy.float32))
axis = 0
with chainer.using_config('debug', True):
with self.assertRaises(AssertionError):
functions.scale(x1, x2, axis)
testing.run_module(__name__, __file__)
| 1,505
| 23.290323
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_fmod.py
|
import math
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestFmod(testing.FunctionTestCase):
def setUp(self):
self.check_forward_options.update({'atol': 1e-7, 'rtol': 1e-7})
self.check_backward_options.update({'atol': 5e-4, 'rtol': 5e-3})
self.check_double_backward_options.update(
{'atol': 1e-3, 'rtol': 1e-2})
def generate_inputs(self):
x = numpy.random.uniform(-1.0, 1.0, self.shape).astype(self.dtype)
divisor = numpy.random.uniform(-1.0, 1.0,
self.shape).astype(self.dtype)
# division with too small divisor is unstable.
for i in numpy.ndindex(self.shape):
if math.fabs(divisor[i]) < 0.1:
divisor[i] += 1.0
# make enough margin
for i in numpy.ndindex(self.shape):
m = math.fabs(x[i] % divisor[i])
if m < 0.01 or m > (divisor[i] - 0.01):
x[i] = 0.5
divisor[i] = 0.3
return x, divisor
def forward(self, inputs, device):
x, divisor = inputs
y = functions.fmod(x, divisor)
return y,
def forward_expected(self, inputs):
x, divisor = inputs
expected = numpy.fmod(x, divisor)
expected = numpy.asarray(expected)
return expected,
testing.run_module(__name__, __file__)
| 1,882
| 26.691176
| 74
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_fft.py
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(4,), (2, 3), (2, 3, 2)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'method': ['fft', 'ifft']
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestFFT(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options.update({
'eps': 2.0 ** -2, 'atol': 1e-2, 'rtol': 1e-3})
self.check_double_backward_options.update({
'atol': 1e-2, 'rtol': 1e-3})
def generate_inputs(self):
rx = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
ix = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return rx, ix
def forward(self, inputs, device):
rx, ix = inputs
ry, iy = getattr(functions, self.method)((rx, ix))
return ry, iy
def forward_expected(self, inputs):
rx, ix = inputs
expected = getattr(numpy.fft, self.method)(rx + ix * 1j)
return (
expected.real.astype(self.dtype),
expected.imag.astype(self.dtype)
)
testing.run_module(__name__, __file__)
| 1,497
| 24.827586
| 71
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_clip.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'x_min_max': [
(-0.75, 1.53),
(numpy.float32(-0.75), numpy.float32(1.53)),
(-1, 2),
(None, 2),
(-1, None),
(None, numpy.float32(1.53)),
(numpy.float32(-0.75), None),
]
}))
class TestClip(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-3, 3, self.shape).astype(self.dtype)
# Avoid values around x_min and x_max for stability of numerical
# gradient
x_min, x_max = self.x_min_max
x_min = float(x_min) if x_min is not None else self.x.min()
x_max = float(x_max) if x_max is not None else self.x.max()
eps = 0.01
for ind in numpy.ndindex(self.x.shape):
if x_min - eps < self.x[ind] < x_min + eps:
self.x[ind] = -0.5
elif x_max - eps < self.x[ind] < x_max + eps:
self.x[ind] = 0.5
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
def check_forward(self, x_data):
x_min, x_max = self.x_min_max
x = chainer.Variable(x_data)
y = functions.clip(x, x_min, x_max)
self.assertEqual(y.data.dtype, self.dtype)
y_expect = self.x.copy()
for i in numpy.ndindex(self.x.shape):
if (x_min is not None) and (self.x[i] < x_min):
y_expect[i] = x_min
elif (x_max is not None) and (self.x[i] > x_max):
y_expect[i] = x_max
testing.assert_allclose(y_expect, y.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
def f(x):
x_min, x_max = self.x_min_max
return functions.clip(x, x_min, x_max)
gradient_check.check_backward(
f, x_data, y_grad, dtype=numpy.float64)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, gx_grad):
def f(x):
x_min, x_max = self.x_min_max
return functions.clip(x, x_min, x_max)
gradient_check.check_double_backward(
f, x_data, y_grad, gx_grad, dtype=numpy.float64, atol=1e-3)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
class TestClipInvalidInterval(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
def test_invalid_interval(self):
with self.assertRaises(ValueError):
functions.clip(self.x, 1.0, -1.0)
def test_max_min_none(self):
with self.assertRaises(ValueError):
functions.clip(self.x, None, None)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestClipBorderGrad(unittest.TestCase):
def setUp(self):
self.x = numpy.arange(6, dtype=self.dtype)
self.x_min = 1.0
self.x_max = 4.0
self.expected = numpy.asarray([0, 1, 1, 1, 1, 0], dtype=self.dtype)
def check_border_grad(self, x, expected):
x = chainer.Variable(x)
y = functions.clip(x, self.x_min, self.x_max)
l = functions.sum(y)
l.backward()
testing.assert_allclose(x.grad, expected, atol=0, rtol=0)
def test_border_grad_cpu(self):
self.check_border_grad(self.x, self.expected)
@attr.gpu
def test_border_grad_gpu(self):
self.check_border_grad(cuda.to_gpu(self.x), cuda.to_gpu(self.expected))
testing.run_module(__name__, __file__)
| 4,350
| 30.302158
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_erfinv.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
def _erfinv_cpu(x, dtype):
from scipy import special
return numpy.vectorize(special.erfinv, otypes=[dtype])(x)
def _erfinv_gpu(x, dtype):
return cuda.to_gpu(_erfinv_cpu(cuda.to_cpu(x), dtype))
def _erfinv_expected(x, dtype):
if backend.get_array_module(x) is numpy:
return _erfinv_cpu(x, dtype)
else:
return _erfinv_gpu(x, dtype)
def make_data(shape, dtype):
x = numpy.random.uniform(-0.9, 0.9, shape).astype(dtype)
gy = numpy.random.uniform(-1, 1, shape).astype(dtype)
ggx = numpy.random.uniform(-1, 1, shape).astype(dtype)
return x, gy, ggx
@testing.unary_math_function_unittest(
F.erfinv,
func_expected=_erfinv_expected,
make_data=make_data,
forward_options={'atol': 1e-3, 'rtol': 1e-3},
backward_options={'eps': 1e-6},
double_backward_options={'eps': 1e-6}
)
@testing.with_requires('scipy')
class TestErfinv(unittest.TestCase):
pass
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))
@testing.without_requires('scipy')
class TestErfinvExceptions(unittest.TestCase):
def setUp(self):
self.x, self.gy, self.ggx = make_data(self.shape, self.dtype)
self.func = F.erfinv
def check_forward(self, x_data):
x = chainer.Variable(x_data)
with self.assertRaises(ImportError):
self.func(x)
def test_forward_cpu(self):
self.check_forward(self.x)
testing.run_module(__name__, __file__)
| 1,682
| 23.75
| 69
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_cumprod.py
|
import unittest
import numpy
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import force_array
from chainer.utils import type_check
@testing.parameterize(*(testing.product_dict(
[
{'shape': (1,), 'axis': 0},
{'shape': (2, 3, 4), 'axis': 0},
{'shape': (2, 3, 4), 'axis': 1},
{'shape': (2, 3, 4), 'axis': 2},
{'shape': (2, 3, 4), 'axis': -3},
{'shape': (2, 3, 4), 'axis': -2},
{'shape': (2, 3, 4), 'axis': -1},
{'shape': (2, 3, 4), 'axis': None},
],
testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'contain_zero': [True, False],
}),
) + testing.product({
'shape': [(0, 3)],
'axis': [-2, 1, None],
'dtype': [numpy.float64],
'contain_zero': [False],
})))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestCumprod(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 1e-2, 'rtol': 1e-2})
self.check_backward_options.update({'atol': 1e-2, 'rtol': 1e-2})
self.check_double_backward_options.update({
'atol': 1e-1, 'rtol': 1e-1, 'eps': 0.01})
elif self.dtype == numpy.float32:
self.check_double_backward_options.update({
'atol': 1e-3, 'rtol': 1e-3})
def generate_inputs(self):
x = numpy.random.uniform(-2, 2, self.shape).astype(self.dtype)
if self.contain_zero:
index = numpy.random.choice(x.size)
x.ravel()[index] = 0
return x,
def forward(self, inputs, device):
x, = inputs
return functions.cumprod(x, axis=self.axis),
def forward_expected(self, inputs):
x, = inputs
expected = numpy.cumprod(x, axis=self.axis)
expected = force_array(expected)
return expected,
@testing.parameterize(
{'axis': 3},
{'axis': -4},
)
class TestCumprodInvalidTypeAxis(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
def check_type_error(self, x):
with self.assertRaises(type_check.InvalidType):
functions.cumprod(x, self.axis)
def test_type_error_cpu(self):
self.check_type_error(self.x)
@attr.gpu
def test_type_error_gpu(self):
self.check_type_error(cuda.to_gpu(self.x))
class TestCumprodInvalidTypeError(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
def test_invalid_type_axis(self):
with self.assertRaises(TypeError):
functions.cumprod(self.x, [0])
with self.assertRaises(TypeError):
functions.cumprod(self.x, (0,))
testing.run_module(__name__, __file__)
| 3,202
| 26.612069
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_erfcx.py
|
import unittest
import numpy
from chainer import backend
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
def _erfcx_cpu(x, dtype):
from scipy import special
return special.erfcx(x).astype(dtype)
def _erfcx_gpu(x, dtype):
return cuda.to_gpu(_erfcx_cpu(cuda.to_cpu(x), dtype))
def _erfcx_expected(x, dtype):
if backend.get_array_module(x) is numpy:
return _erfcx_cpu(x, dtype)
else:
return _erfcx_gpu(x, dtype)
@testing.unary_math_function_unittest(
F.erfcx,
func_expected=_erfcx_expected,
)
@testing.with_requires('scipy')
class TestErfcx(unittest.TestCase):
pass
testing.run_module(__name__, __file__)
| 706
| 18.108108
| 57
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_einsum.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer.functions.math import einsum
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer import utils
def _tuple_to_gpu(xs):
return tuple(cuda.to_gpu(x) for x in xs)
def _from_str_subscript(subscript):
# subscript should be lower case (a-z)
return [
(Ellipsis if char == '@' else ord(char) - ord('a'))
for char in subscript.replace('...', '@')
]
_np_einsum_float16_bug = numpy.lib.NumpyVersion(numpy.__version__) < '1.15.0'
def _skip_float16_bug():
if _np_einsum_float16_bug:
raise unittest.SkipTest(
'float16 is not supported. See numpy issue #10899.')
@testing.parameterize(*testing.product_dict(
[
{'subscripts': 'ij,jk->ik', 'shapes': ((2, 3), (3, 4))},
{'subscripts': ',ij->i', 'shapes': ((), (3, 4),)},
{'subscripts': 'kj,ji->ik', 'shapes': ((2, 3), (3, 4))},
{'subscripts': 'ij,jk,kl->il', 'shapes': ((5, 2), (2, 3), (3, 4))},
{'subscripts': 'ij,ij->i', 'shapes': ((2, 3), (2, 3))},
{'subscripts': 'ij,jk', 'shapes': ((2, 3), (3, 4))},
{'subscripts': 'i->', 'shapes': ((3,),)},
{'subscripts': 'ii', 'shapes': ((2, 2),)},
{'subscripts': 'ii->i', 'shapes': ((2, 2),)},
{'subscripts': 'j,j', 'shapes': ((3,), (3))},
{'subscripts': 'j,ij', 'shapes': ((3,), (2, 3))},
{'subscripts': 'j,iij', 'shapes': ((3,), (2, 2, 3))},
{'subscripts': 'iij,kkj', 'shapes': ((2, 2, 3), (4, 4, 3))},
{'subscripts': '...ij,...jk->...ik',
'shapes': ((2, 1, 2, 3), (2, 1, 3, 4))},
{'subscripts': 'i...j,jk...->k...i', 'shapes': ((4, 2, 3), (3, 5, 2))},
{'subscripts': 'ii...,...jj', 'shapes': ((2, 2, 4), (4, 3, 3))},
{'subscripts': '...i,i', 'shapes': ((2, 2, 3), (3,))},
{'subscripts': 'i...,i->...i', 'shapes': ((3, 2, 2), (3,))},
{'subscripts': 'i,ji,i', 'shapes': ((3,), (2, 3), (3,))},
{'subscripts': 'i,i,i->i', 'shapes': ((3,), (3,), (3,))},
],
testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'subscript_type': ['str', 'int'],
}),
))
class TestEinSum(unittest.TestCase):
def setUp(self):
self.inputs = tuple([
self._setup_tensor(-1, 1, shape, self.dtype)
for shape in self.shapes
])
if self.dtype == numpy.float16:
# Avoid numpy issue #10899
self.forward_answer = numpy.einsum(
*self._get_args(self.inputs),
dtype=numpy.float64
).astype(self.dtype)
else:
self.forward_answer = numpy.einsum(*self._get_args(self.inputs))
self.g = self._setup_tensor(
-1, 1, self.forward_answer.shape, self.dtype)
self.gg_inputs = tuple([
self._setup_tensor(-1, 1, shape, self.dtype)
for shape in self.shapes
])
self.op = lambda *xs: einsum.einsum(*self._get_args(xs))
def _get_args(self, xs):
if self.subscript_type == 'str':
return (self.subscripts,) + xs
else:
args = []
subscripts = self.subscripts.split('->')
for in_subscript, x in zip(subscripts[0].split(','), xs):
args.extend([x, _from_str_subscript(in_subscript)])
if len(subscripts) == 2:
args.append(_from_str_subscript(subscripts[1]))
return tuple(args)
def _setup_tensor(self, _min, _max, shape, dtype):
return numpy.random.uniform(_min, _max, shape).astype(dtype)
def check_forward(self, inputs_data, atol=1e-4, rtol=1e-5):
out = self.op(*[chainer.Variable(x) for x in inputs_data])
testing.assert_allclose(self.forward_answer, out.data, atol, rtol)
def test_einsum_forward_cpu(self):
if self.dtype == numpy.float16:
_skip_float16_bug()
self.check_forward(self.inputs, atol=5e-3, rtol=1e-3)
else:
self.check_forward(self.inputs)
@attr.gpu
def test_einsum_forward_gpu(self):
inputs = _tuple_to_gpu(self.inputs)
if self.dtype == numpy.float16:
self.check_forward(inputs, atol=5e-3, rtol=1e-3)
else:
self.check_forward(inputs)
def check_backward(self, inputs_data, output_grad, atol, rtol):
gradient_check.check_backward(
self.op, inputs_data, output_grad, atol=atol, rtol=rtol,
dtype=numpy.float64)
def test_einsum_backward_cpu(self):
if self.dtype == numpy.float16:
_skip_float16_bug()
self.check_backward(self.inputs, self.g, atol=1e-2, rtol=5e-2)
@attr.gpu
def test_einsum_backward_gpu(self):
self.check_backward(
_tuple_to_gpu(self.inputs),
cuda.to_gpu(self.g), atol=1e-2, rtol=5e-2)
def check_double_backward(
self, inputs_data, y_grad, inputs_grad_grad,
atol, rtol):
gradient_check.check_double_backward(
self.op, inputs_data, y_grad, inputs_grad_grad,
atol=atol, rtol=rtol, dtype=numpy.float64)
def test_einsum_double_backward_cpu(self):
if self.dtype == numpy.float16:
_skip_float16_bug()
self.check_double_backward(
self.inputs, self.g, self.gg_inputs,
atol=1e-2, rtol=5e-2)
@attr.gpu
def test_einsum_double_backward_gpu(self):
self.check_double_backward(
_tuple_to_gpu(self.inputs), cuda.to_gpu(self.g),
_tuple_to_gpu(self.gg_inputs), atol=1e-2, rtol=1e-2)
@testing.parameterize(
# mismatch: 'i'
{'subscripts': 'i,i', 'shapes': ((2,), (3,))},
{'subscripts': 'i,i->i', 'shapes': ((2,), (3,))},
{'subscripts': 'ii', 'shapes': ((2, 3),)},
# mismatch: '...'
{'subscripts': '...i,...i', 'shapes': ((2, 2), (3, 2))},
{'subscripts': '...i,...j', 'shapes': ((2, 3), (3, 2))},
{'subscripts': '...i,j...', 'shapes': ((2, 3), (2, 3))},
{'subscripts': 'i...,j...', 'shapes': ((2, 3), (3, 2))},
# F.einsum does not allow broadcasting
{'subscripts': '...i,...i', 'shapes': ((2, 2), (1, 2))},
{'subscripts': '...i,...i', 'shapes': ((2,), (1, 2))},
)
class TestEinSumInvalid(unittest.TestCase):
def setUp(self):
self.inputs = tuple([
numpy.zeros(shape, numpy.float32)
for shape in self.shapes
])
def test_raise_invalid_type(self):
with self.assertRaises(utils.type_check.InvalidType):
einsum.einsum(self.subscripts, *self.inputs)
@testing.parameterize(
{'subscripts': 'i,i', 'shapes': ((2,), (2,), (2,))},
{'subscripts': 'i,i', 'shapes': ((2,),)},
{'subscripts': 'i,i->j', 'shapes': ((2,), (2,))},
{'subscripts': 'i,i->...', 'shapes': ((2,), (2,))},
)
class TestEinSumParseError(unittest.TestCase):
def setUp(self):
self.inputs = tuple([
numpy.zeros(shape, numpy.float32)
for shape in self.shapes
])
def test_raise_parse_error(self):
with self.assertRaises(ValueError):
einsum.einsum(self.subscripts, *self.inputs)
@testing.parameterize(
{'subscripts': '...->', 'shapes': ((2,),)},
{'subscripts': 'j...i->ij', 'shapes': ((2, 1, 3),)},
{'subscripts': 'i,...i->', 'shapes': ((2,), (3, 2))},
)
class TestEinSumUndefinedSemantics(unittest.TestCase):
def setUp(self):
self.inputs = tuple([
numpy.zeros(shape, numpy.float32)
for shape in self.shapes
])
def test_bad_ellipsis_sum(self):
with self.assertRaises(ValueError):
einsum.einsum(self.subscripts, *self.inputs)
def diag_einsum(
input_subscripts, output_subscript, *ioperands, **kwargs):
output_shape, = utils.argument.parse_kwargs(kwargs, ('output_shape', None))
return einsum.DiagEinSum(
in_subs=input_subscripts,
out_sub=output_subscript,
out_shape=output_shape,
).apply(ioperands)[0]
@testing.parameterize(*testing.product_dict(
[
{'subscripts': 'i->ij', 'i_shapes': ((3,),), 'o_shape': (3, 4)},
{'subscripts': '->i', 'i_shapes': ((),), 'o_shape': (3,)},
{'subscripts': ',i->ij', 'i_shapes': ((), (2,),), 'o_shape': (2, 3)},
{'subscripts': ',ij->i', 'i_shapes': ((), (3, 4),), 'o_shape': (3,)},
],
[
{'dtype': numpy.float32},
{'dtype': numpy.float64},
]
))
class TestDiagEinSum(unittest.TestCase):
def setUp(self):
self.inputs = [
self._setup_tensor(-1, 1, shape, self.dtype)
for shape in self.i_shapes
]
self.g = self._setup_tensor(-1, 1, self.o_shape, self.dtype)
self.gg_inputs = [
self._setup_tensor(-1, 1, shape, self.dtype)
for shape in self.i_shapes
]
i_sub, o_sub = self.subscripts.split('->')
self.op = lambda *xs: diag_einsum(
i_sub, o_sub, *xs, output_shape=self.o_shape)
def _setup_tensor(self, _min, _max, shape, dtype):
return numpy.random.uniform(_min, _max, shape).astype(dtype)
# TODO(kataoka): test forward
def check_backward(self, inputs_data, output_grad, atol, rtol):
gradient_check.check_backward(
self.op, inputs_data, output_grad, atol=atol, rtol=rtol,
dtype=numpy.float64)
def test_einsum_backward_cpu(self):
self.check_backward(self.inputs, self.g, atol=1e-2, rtol=5e-2)
@attr.gpu
def test_einsum_backward_gpu(self):
self.check_backward(
_tuple_to_gpu(self.inputs),
cuda.to_gpu(self.g), atol=1e-2, rtol=5e-2)
def check_double_backward(
self, inputs_data, y_grad, inputs_grad_grad,
atol, rtol):
gradient_check.check_double_backward(
self.op, inputs_data, y_grad, inputs_grad_grad,
atol=atol, rtol=rtol, dtype=numpy.float64)
def test_einsum_double_backward_cpu(self):
self.check_double_backward(
self.inputs, self.g, self.gg_inputs,
atol=1e-2, rtol=5e-2)
@attr.gpu
def test_einsum_double_backward_gpu(self):
self.check_double_backward(
_tuple_to_gpu(self.inputs), cuda.to_gpu(self.g),
_tuple_to_gpu(self.gg_inputs), atol=1e-2, rtol=1e-2)
testing.run_module(__name__, __file__)
| 10,446
| 33.707641
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_logsumexp.py
|
import unittest
import numpy
from chainer import functions
from chainer import testing
from chainer.utils import force_array
@testing.parameterize(
*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'shape': [(), (3, 2, 4)],
'axis': [None, 0, 1, 2, -1, (0, 1), (1, 0), (0, -1), (-2, 0)]
})
)
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestLogSumExp(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options.update({'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'eps': 2.0 ** -3, 'rtol': 1e-1, 'atol': 1e-1})
self.check_double_backward_options.update({
'eps': 2.0 ** -3, 'rtol': 1e-1, 'atol': 1e-1})
else:
self.check_backward_options.update({
'eps': 2.0 ** -5, 'rtol': 1e-4, 'atol': 1e-4})
self.check_double_backward_options.update({
'eps': 2.0 ** -5, 'rtol': 1e-4, 'atol': 1e-4})
def before_test(self, test_name):
if self.axis is not None and self.shape == ():
raise unittest.SkipTest('Axis must be None on 0-dim input.')
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.logsumexp(x, axis=self.axis),
def forward_expected(self, inputs):
x, = inputs
expected = numpy.log(numpy.exp(x).sum(axis=self.axis))
expected = force_array(expected)
return expected,
class TestLogSumExpInvalidAxis(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(numpy.float32)
def test_invalid_axis_type(self):
with self.assertRaises(TypeError):
functions.logsumexp(self.x, [0])
def test_invalid_axis_type_in_tuple(self):
with self.assertRaises(TypeError):
functions.logsumexp(self.x, (1, 'x'))
def test_duplicate_axis(self):
with self.assertRaises(ValueError):
functions.logsumexp(self.x, (0, 0))
def test_pos_neg_duplicate_axis(self):
with self.assertRaises(ValueError):
functions.logsumexp(self.x, (1, -2))
testing.run_module(__name__, __file__)
| 2,689
| 28.23913
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_det.py
|
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'batched': [True, False],
}))
class DetFunctionTest(unittest.TestCase):
def setUp(self):
if self.batched:
while True:
x = numpy.random.uniform(
.5, 1, (6, 3, 3)).astype(numpy.float32)
# Avoid backward/double_backward instability.
if not numpy.any(numpy.isclose(
numpy.linalg.det(x), 0, atol=1e-2, rtol=1e-2)):
self.x = x.astype(self.dtype, copy=False)
break
self.y = numpy.random.uniform(
.5, 1, (6, 3, 3)).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, (6,)).astype(self.dtype)
self.ggx = numpy.random.uniform(
.5, 1, (6, 3, 3)).astype(self.dtype)
self.ct = self.x.transpose(0, 2, 1)
self.det = F.batch_det
self.matmul = F.matmul
else:
while True:
x = numpy.random.uniform(.5, 1, (5, 5)).astype(numpy.float32)
if not numpy.isclose(
numpy.linalg.det(x), 0, atol=1e-2, rtol=1e-2):
self.x = x.astype(self.dtype, copy=False)
break
self.y = numpy.random.uniform(.5, 1, (5, 5)).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, ()).astype(self.dtype)
self.ggx = numpy.random.uniform(.5, 1, (5, 5)).astype(self.dtype)
self.ct = self.x.transpose()
self.det = F.det
self.matmul = F.matmul
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-3, 'rtol': 5e-3}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 5e-3, 'rtol': 1e-3}
self.check_double_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
else:
self.check_forward_options = {}
self.check_backward_options = {'atol': 5e-3, 'rtol': 1e-3}
self.check_double_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
def det_transpose(self, gpu=False):
if gpu:
cx = cuda.to_gpu(self.x)
ct = cuda.to_gpu(self.ct)
else:
cx = self.x
ct = self.ct
xn = chainer.Variable(cx)
xt = chainer.Variable(ct)
yn = self.det(xn)
yt = self.det(xt)
testing.assert_allclose(yn.data, yt.data, rtol=1e-4, atol=1)
@attr.gpu
def test_det_transpose_gpu(self):
self.det_transpose(gpu=True)
def test_det_transpose_cpu(self):
self.det_transpose(gpu=False)
def det_scaling(self, gpu=False):
scaling = numpy.random.randn(1).astype(self.dtype)
if gpu:
cx = cuda.to_gpu(self.x)
sx = cuda.to_gpu(scaling * self.x)
else:
cx = self.x
sx = scaling * self.x
c = float(scaling ** self.x.shape[1])
cxv = chainer.Variable(cx)
sxv = chainer.Variable(sx)
cxd = self.det(cxv)
sxd = self.det(sxv)
testing.assert_allclose(
cxd.data * c, sxd.data, **self.check_forward_options)
@attr.gpu
def test_det_scaling_gpu(self):
self.det_scaling(gpu=True)
def test_det_scaling_cpu(self):
self.det_scaling(gpu=False)
def det_identity(self, gpu=False):
if self.batched:
chk = numpy.ones(len(self.x), dtype=self.dtype)
dt = numpy.identity(self.x.shape[1], dtype=self.dtype)
idt = numpy.repeat(dt[None], len(self.x), axis=0)
else:
idt = numpy.identity(self.x.shape[1], dtype=self.dtype)
chk = numpy.ones(1, dtype=self.dtype)
if gpu:
chk = cuda.to_gpu(chk)
idt = cuda.to_gpu(idt)
idtv = chainer.Variable(idt)
idtd = self.det(idtv)
testing.assert_allclose(idtd.data, chk, **self.check_forward_options)
@attr.gpu
def test_det_identity_gpu(self):
self.det_identity(gpu=True)
def test_det_identity_cpu(self):
self.det_identity(gpu=False)
def det_product(self, gpu=False):
if gpu:
cx = cuda.to_gpu(self.x)
cy = cuda.to_gpu(self.y)
else:
cx = self.x
cy = self.y
vx = chainer.Variable(cx)
vy = chainer.Variable(cy)
dxy1 = self.det(self.matmul(vx, vy))
dxy2 = self.det(vx) * self.det(vy)
testing.assert_allclose(
dxy1.data, dxy2.data, **self.check_forward_options)
def test_det_product_cpu(self):
self.det_product(gpu=False)
@attr.gpu
def test_det_product_gpu(self):
self.det_product(gpu=True)
@attr.gpu
def test_batch_backward_gpu(self):
x_data = cuda.to_gpu(self.x)
y_grad = cuda.to_gpu(self.gy)
gradient_check.check_backward(
self.det, x_data, y_grad, **self.check_backward_options)
def test_batch_backward_cpu(self):
x_data, y_grad = self.x, self.gy
gradient_check.check_backward(
self.det, x_data, y_grad, **self.check_backward_options)
@attr.gpu
def test_batch_double_backward_gpu(self):
x_data = cuda.to_gpu(self.x)
y_grad = cuda.to_gpu(self.gy)
x_grad_grad = cuda.to_gpu(self.ggx)
gradient_check.check_double_backward(
self.det, x_data, y_grad, x_grad_grad,
**self.check_double_backward_options)
def test_batch_double_backward_cpu(self):
x_data, y_grad, x_grad_grad = self.x, self.gy, self.ggx
gradient_check.check_double_backward(
self.det, x_data, y_grad, x_grad_grad,
**self.check_double_backward_options)
def check_single_matrix(self, x):
x = chainer.Variable(x)
y = self.det(x)
if self.batched:
self.assertEqual(y.data.ndim, 1)
else:
self.assertEqual(y.data.ndim, 0)
def test_single_matrix_cpu(self):
self.check_single_matrix(self.x)
@attr.gpu
def test_expect_scalar_gpu(self):
self.check_single_matrix(cuda.to_gpu(self.x))
def check_singular_matrix(self, x):
if self.batched:
x[0, ...] = 0.0
else:
x[...] = 0.0
x = chainer.Variable(x)
# it checks no errors are raised
self.det(x)
def test_singular_matrix_cpu(self):
self.check_singular_matrix(self.x)
@attr.gpu
def test_singular_matrix_gpu(self):
self.check_singular_matrix(cuda.to_gpu(self.x))
def check_zero_det(self, x, gy, err):
if self.batched:
x[0, ...] = 0.0
else:
x[...] = 0.0
with self.assertRaises(err):
gradient_check.check_backward(
self.det, x, gy,
**self.check_backward_options)
def test_zero_det_cpu(self):
self.check_zero_det(self.x, self.gy, ValueError)
@attr.gpu
def test_zero_det_gpu(self):
with chainer.using_config('debug', True):
self.check_zero_det(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), ValueError)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestDetSmallCase(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(.5, 1, (2, 2)).astype(self.dtype)
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 5e-3, 'rtol': 5e-3}
else:
self.check_forward_options = {}
def check_by_definition(self, x):
ans = F.det(chainer.Variable(x)).data
y = x[0, 0] * x[1, 1] - x[0, 1] * x[1, 0]
testing.assert_allclose(ans, y, **self.check_forward_options)
def test_answer_cpu(self):
self.check_by_definition(self.x)
@attr.gpu
def test_answer_gpu(self):
self.check_by_definition(cuda.to_gpu(self.x))
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'shape': [(s, s) for s in six.moves.range(1, 5)],
}))
class TestDetGPUCPUConsistency(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype)
@attr.gpu
def test_answer_gpu_cpu(self):
x = cuda.to_gpu(self.x)
y = F.det(chainer.Variable(x))
gpu = cuda.to_cpu(y.data)
if self.dtype == numpy.float16:
cpu = numpy.linalg.det(
self.x.astype(numpy.float32)).astype(numpy.float16)
testing.assert_allclose(gpu, cpu, atol=5e-3, rtol=5e-3)
else:
cpu = numpy.linalg.det(self.x)
testing.assert_allclose(gpu, cpu)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'shape': [(w, s, s) for s in six.moves.range(1, 5)
for w in six.moves.range(1, 5)],
}))
class TestBatchDetGPUCPUConsistency(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype)
@attr.gpu
def test_answer_gpu_cpu(self):
x = cuda.to_gpu(self.x)
y = F.batch_det(chainer.Variable(x))
gpu = cuda.to_cpu(y.data)
if self.dtype == numpy.float16:
cpu = numpy.linalg.det(
self.x.astype(numpy.float32)).astype(numpy.float16)
testing.assert_allclose(gpu, cpu, atol=5e-3, rtol=5e-3)
else:
cpu = numpy.linalg.det(self.x)
testing.assert_allclose(gpu, cpu)
class DetFunctionRaiseTest(unittest.TestCase):
def test_invalid_ndim(self):
with self.assertRaises(type_check.InvalidType):
F.batch_det(chainer.Variable(numpy.zeros((2, 2))))
def test_invalid_shape(self):
with self.assertRaises(type_check.InvalidType):
F.batch_det(chainer.Variable(numpy.zeros((1, 2))))
testing.run_module(__name__, __file__)
| 10,333
| 32.121795
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_prod.py
|
import unittest
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*(testing.product({
'axis': [None, 0, 1, 2, -1, (0, 1), (1, 0), (0, -1), (-2, 0)],
'keepdims': [True, False],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'contain_zero': [True, False],
'shape': [(3, 2, 4)],
}) + testing.product({
'axis': [None, 0, 1, 2, (0, 1), (0, -1)],
'keepdims': [True, False],
'dtype': [numpy.float32],
'contain_zero': [False],
'shape': [(3, 1, 0)],
})))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestProd(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 1e-3, 'rtol': 1e-3})
self.check_backward_options.update({'atol': 1e-3, 'rtol': 1e-3})
self.check_double_backward_options.update({'atol': 1e-3,
'rtol': 1e-3})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
if self.contain_zero:
index = numpy.random.choice(x.size)
x.ravel()[index] = 0
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.prod(x, axis=self.axis, keepdims=self.keepdims)
return y,
def forward_expected(self, inputs):
x, = inputs
expected = x.prod(axis=self.axis, keepdims=self.keepdims)
expected = numpy.asarray(expected)
return expected,
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestProdError(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 2, 4)).astype(self.dtype)
def test_invalid_axis_type(self):
with self.assertRaises(TypeError):
functions.prod(self.x, axis=[0])
def test_invalid_axis_type_in_tuple(self):
with self.assertRaises(TypeError):
functions.prod(self.x, axis=(1, 'x'))
def test_duplicate_axis(self):
with self.assertRaises(ValueError):
functions.prod(self.x, axis=(0, 0))
def test_pos_neg_duplicate_axis(self):
with self.assertRaises(ValueError):
self.x.prod(axis=(1, -2))
testing.run_module(__name__, __file__)
| 2,750
| 28.265957
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_exponential.py
|
import numpy
from chainer import functions
from chainer import testing
from chainer import utils
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestExp(testing.FunctionTestCase):
def setUp(self):
self.check_forward_options = {'atol': 1e-7, 'rtol': 1e-7}
if self.dtype == numpy.float16:
self.check_backward_options = {'atol': 3e-3, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 3e-3, 'rtol': 1e-2}
else:
self.check_backward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_double_backward_options = {'atol': 1e-4, 'rtol': 1e-3}
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.exp(x),
def forward_expected(self, inputs):
x, = inputs
expected = numpy.exp(x)
expected = utils.force_array(expected)
return expected,
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'function_name': ['log', 'log2', 'log10'],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestLog(testing.FunctionTestCase):
def setUp(self):
self.check_forward_options = {'atol': 1e-7, 'rtol': 1e-7}
if self.dtype == numpy.float16:
self.check_backward_options = {'atol': 3e-3, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 3e-3, 'rtol': 1e-2}
else:
self.check_backward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_double_backward_options = {'atol': 1e-4, 'rtol': 1e-3}
def generate_inputs(self):
x = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
function = getattr(functions, self.function_name)
return function(x),
def forward_expected(self, inputs):
x, = inputs
function = getattr(numpy, self.function_name)
expected = function(x)
expected = utils.force_array(expected)
return expected,
testing.run_module(__name__, __file__)
| 2,992
| 26.712963
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_polygamma.py
|
import unittest
import numpy
import chainer
import chainer.functions as F
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(), (3, 2)],
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1']
})
)
@testing.with_requires('scipy')
class TestPolyGamma(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-2, 'rtol': 1e-2}
else:
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-4}
self.check_backward_options = {'eps': 1e-3, 'atol': 5e-2, 'rtol': 1e-3}
self.check_double_backward_options = {'eps': 1e-3, 'atol': 5e-2,
'rtol': 1e-3}
def generate_inputs(self):
n = numpy.random.randint(3, size=self.shape).astype(numpy.int32)
x = numpy.random.uniform(1., 10., self.shape).astype(self.dtype)
return n, x
def forward_expected(self, inputs):
n, x = inputs
import scipy
y_expect = scipy.special.polygamma(n, x)
return y_expect.astype(self.dtype),
def forward(self, inputs, device):
n, x = inputs
y = F.polygamma(n, x)
return y,
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))
@testing.without_requires('scipy')
class TestPolyGammaExceptions(unittest.TestCase):
def setUp(self):
self.x = \
numpy.random.uniform(1., 10., self.shape).astype(self.dtype)
self.n = numpy.random.randint(3, size=self.shape).astype(numpy.int32)
self.func = F.polygamma
def check_forward(self, n_data, x_data):
x = chainer.Variable(x_data)
n = chainer.Variable(n_data)
with self.assertRaises(ImportError):
self.func(n, x)
def test_polygamma_forward_cpu(self):
self.check_forward(self.n, self.x)
testing.run_module(__name__, __file__)
| 2,324
| 27.012048
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_erfc.py
|
import math
import unittest
import numpy
from chainer import backend
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
def _erfc_cpu(x, dtype):
return numpy.vectorize(math.erfc, otypes=[dtype])(x)
def _erfc_gpu(x, dtype):
return cuda.to_gpu(_erfc_cpu(cuda.to_cpu(x), dtype))
def _erfc_expected(x, dtype):
if backend.get_array_module(x) is numpy:
return _erfc_cpu(x, dtype)
else:
return _erfc_gpu(x, dtype)
@testing.unary_math_function_unittest(
F.erfc,
func_expected=_erfc_expected,
)
class TestErfc(unittest.TestCase):
pass
testing.run_module(__name__, __file__)
| 662
| 17.416667
| 56
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_inv.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
def _inv(x):
if x.ndim == 2:
return numpy.linalg.inv(x)
return numpy.array([numpy.linalg.inv(ix) for ix in x])
def _make_eye(shape):
if len(shape) == 2:
n = shape[0]
return numpy.eye(n, dtype=numpy.float32)
m = shape[0]
n = shape[1]
return numpy.array([numpy.eye(n, dtype=numpy.float32)] * m)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'shape': [(1, 1), (5, 5)],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class InvFunctionTest(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_dtype = numpy.float32
self.check_forward_options.update({'atol': 1e-3, 'rtol': 1e-3})
self.check_backward_options.update({'atol': 1e-3, 'rtol': 1e-3})
self.check_double_backward_options.update({
'atol': 5e-3, 'rtol': 5e-3})
else:
self.check_forward_dtype = self.dtype
self.check_forward_options.update({'atol': 1e-4, 'rtol': 1e-4})
self.check_backward_options.update({'atol': 5e-4, 'rtol': 5e-4})
self.check_double_backward_options.update({
'atol': 5e-4, 'rtol': 5e-4})
def generate_inputs(self):
x = (numpy.eye(self.shape[-1]) +
numpy.random.uniform(-0.01, 0.01, self.shape)).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.inv(x),
def forward_expected(self, inputs):
x, = inputs
x1 = x.astype(self.check_forward_dtype, copy=False)
return _inv(x1).astype(self.dtype),
def test_identity(self, backend_config):
x, = self.generate_inputs()
x = chainer.Variable(backend_config.get_array(x))
y = functions.matmul(x, functions.inv(x))
testing.assert_allclose(
y.data, _make_eye(x.shape), **self.check_forward_options)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'shape': [(5, 1, 1), (3, 5, 5)],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class BatchInvFunctionTest(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_dtype = numpy.float32
self.check_forward_options.update({'atol': 1e-3, 'rtol': 1e-3})
self.check_backward_options.update({'atol': 2e-3, 'rtol': 2e-3})
self.check_double_backward_options.update({
'atol': 5e-3, 'rtol': 5e-3})
else:
self.check_forward_dtype = self.dtype
self.check_forward_options.update({'atol': 1e-4, 'rtol': 1e-4})
self.check_backward_options.update({'atol': 5e-4, 'rtol': 5e-4})
self.check_double_backward_options.update({
'atol': 1e-3, 'rtol': 1e-3})
def generate_inputs(self):
x = numpy.random.uniform(-0.01, 0.01, self.shape).astype(self.dtype)
x += numpy.eye(self.shape[-1])
return x,
def forward(self, inputs, device):
x, = inputs
return functions.batch_inv(x),
def forward_expected(self, inputs):
x, = inputs
x1 = x.astype(self.check_forward_dtype, copy=False)
return _inv(x1).astype(self.dtype),
def test_identity(self, backend_config):
x, = self.generate_inputs()
x = chainer.Variable(backend_config.get_array(x))
y = functions.matmul(x, functions.batch_inv(x))
testing.assert_allclose(
y.data, _make_eye(x.shape), **self.check_forward_options)
class InvFunctionRaiseTest(unittest.TestCase):
def test_invalid_ndim(self):
x = chainer.Variable(numpy.zeros((1, 2, 2), dtype=numpy.float32))
with self.assertRaises(type_check.InvalidType):
functions.inv(x)
def test_invalid_shape(self):
x = chainer.Variable(numpy.zeros((1, 2), dtype=numpy.float32))
with self.assertRaises(type_check.InvalidType):
functions.inv(x)
def test_singular_cpu(self):
x = chainer.Variable(numpy.zeros((2, 2), dtype=numpy.float32))
with self.assertRaises(ValueError):
functions.inv(x)
@attr.gpu
def test_singular_gpu(self):
x = chainer.Variable(
cuda.to_gpu(numpy.zeros((2, 2), dtype=numpy.float32)))
# Should raise exception only when debug mode.
with chainer.using_config('debug', False):
functions.inv(x)
with chainer.using_config('debug', True):
with self.assertRaises(ValueError):
functions.inv(x)
class BatchInvFunctionRaiseTest(unittest.TestCase):
def test_invalid_ndim(self):
x = chainer.Variable(numpy.zeros((2, 2), dtype=numpy.float32))
with self.assertRaises(type_check.InvalidType):
functions.batch_inv(x)
def test_invalid_shape(self):
x = chainer.Variable(numpy.zeros((1, 2, 1), dtype=numpy.float32))
with self.assertRaises(type_check.InvalidType):
functions.batch_inv(x)
def test_singular_cpu(self):
x = chainer.Variable(numpy.zeros((1, 2, 2), dtype=numpy.float32))
with self.assertRaises(ValueError):
functions.batch_inv(x)
@attr.gpu
def test_singular_gpu(self):
x = chainer.Variable(
cuda.to_gpu(numpy.zeros((1, 2, 2), dtype=numpy.float32)))
# Should raise exception only when debug mode.
with chainer.using_config('debug', False):
functions.batch_inv(x)
with chainer.using_config('debug', True):
with self.assertRaises(ValueError):
functions.batch_inv(x)
testing.run_module(__name__, __file__)
| 6,604
| 30.303318
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_basic_math.py
|
import operator
import sys
import unittest
import numpy
import pytest
import chainer
from chainer.backends import cuda
from chainer import basic_math
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import backend
from chainer.utils import type_check
import chainerx
def arrays_to_chainerx(orig_xp, np_arrays):
assert all(isinstance(a, numpy.ndarray) for a in np_arrays)
if orig_xp is numpy:
orig_arrays = np_arrays
elif orig_xp is cuda.cupy:
orig_arrays = [cuda.to_gpu(a) for a in np_arrays]
return [chainer.backend.to_chx(a) for a in orig_arrays]
@testing.parameterize(*testing.product({
'shape': [
# x1, x2, y
((3, 2), (3, 2), (3, 2)),
((), (), ()),
((3, 2), (3, 1), (3, 2)),
((2,), (3, 2), (3, 2)),
],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestBinaryOp(unittest.TestCase):
def setUp(self):
self.x1 = numpy.random.uniform(.5, 1, self.shape[0]).astype(self.dtype)
self.x2 = numpy.random.uniform(.5, 1, self.shape[1]).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, self.shape[2]).astype(self.dtype)
self.ggx1 = numpy.random.uniform(-1, 1, self.shape[0]).astype(
self.dtype)
self.ggx2 = numpy.random.uniform(-1, 1, self.shape[1]).astype(
self.dtype)
def check_forward(self, op, x1_data, x2_data):
x1 = chainer.Variable(x1_data)
x2 = chainer.Variable(x2_data)
y = op(x1, x2)
options = {}
if self.dtype == numpy.float16:
options = {'atol': 1e-4, 'rtol': 1e-3}
testing.assert_allclose(op(self.x1, self.x2), y.data, **options)
def forward_cpu(self, op):
self.check_forward(op, self.x1, self.x2)
def test_add_forward_cpu(self):
self.forward_cpu(lambda x, y: x + y)
def test_sub_forward_cpu(self):
self.forward_cpu(lambda x, y: x - y)
def test_mul_forward_cpu(self):
self.forward_cpu(lambda x, y: x * y)
def test_div_forward_cpu(self):
self.forward_cpu(lambda x, y: x / y)
def test_floordiv_forward_cpu(self):
self.forward_cpu(lambda x, y: x // y)
def test_pow_forward_cpu(self):
self.forward_cpu(lambda x, y: x ** y)
def test_radd_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__radd__(x))
def test_rsub_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rsub__(x))
def test_rmul_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rmul__(x))
def test_rdiv_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rtruediv__(x))
def test_rfloordiv_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rfloordiv__(x))
def test_rpow_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rpow__(x))
def forward_gpu(self, op):
self.check_forward(op, cuda.to_gpu(self.x1), cuda.to_gpu(self.x2))
@attr.gpu
def test_add_forward_gpu(self):
self.forward_gpu(lambda x, y: x + y)
@attr.gpu
def test_sub_forward_gpu(self):
self.forward_gpu(lambda x, y: x - y)
@attr.gpu
def test_mul_forward_gpu(self):
self.forward_gpu(lambda x, y: x * y)
@attr.gpu
def test_div_forward_gpu(self):
self.forward_gpu(lambda x, y: x / y)
@attr.gpu
def test_floordiv_forward_gpu(self):
self.forward_gpu(lambda x, y: x // y)
@attr.gpu
def test_pow_forward_gpu(self):
self.forward_gpu(lambda x, y: x ** y)
@attr.gpu
def test_radd_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__radd__(x))
@attr.gpu
def test_rsub_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rsub__(x))
@attr.gpu
def test_rmul_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rmul__(x))
@attr.gpu
def test_rdiv_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rtruediv__(x))
@attr.gpu
def test_rfloordiv_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rfloordiv__(x))
@attr.gpu
def test_rpow_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rpow__(x))
@attr.gpu
def test_add_constant_allocation(self):
x = 0
y = chainer.Variable(cuda.cupy.ones((1,)))
z = y + x
self.assertEqual(1, z.data.get()[0])
def forward_chainerx(self, op, orig_xp):
xs_chx = arrays_to_chainerx(orig_xp, (self.x1, self.x2))
self.check_forward(op, *xs_chx)
@attr.chainerx
def test_add_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: x + y, numpy)
@attr.chainerx
def test_sub_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: x - y, numpy)
@attr.chainerx
def test_mul_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: x * y, numpy)
@attr.chainerx
def test_div_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: x / y, numpy)
@attr.chainerx
@attr.gpu
def test_add_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: x + y, cuda.cupy)
@attr.chainerx
@attr.gpu
def test_sub_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: x - y, cuda.cupy)
@attr.chainerx
@attr.gpu
def test_mul_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: x * y, cuda.cupy)
@attr.chainerx
@attr.gpu
def test_div_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: x / y, cuda.cupy)
# TODO(hvy): Implement floor.
@pytest.mark.skip
@attr.chainerx
def test_floordiv_forward_chainerx_cpu(self):
pass
@attr.chainerx
def test_pow_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: x.__pow__(y), numpy)
@attr.chainerx
def test_radd_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: y.__radd__(x), numpy)
@attr.chainerx
def test_rsub_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: y.__rsub__(x), numpy)
@attr.chainerx
def test_rmul_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: y.__rmul__(x), numpy)
@attr.chainerx
def test_rdiv_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: y.__rtruediv__(x), numpy)
# TODO(hvy): Implement floor.
@pytest.mark.skip
@attr.chainerx
def test_rfloordiv_forward_chainerx_cpu(self):
pass
@attr.chainerx
def test_rpow_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: y.__rpow__(x), numpy)
# TODO(hvy): Implement floor.
@pytest.mark.skip
@attr.chainerx
@attr.gpu
def test_floordiv_forward_chainerx_gpu(self):
pass
@attr.chainerx
@attr.gpu
def test_pow_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: x.__pow__(y), cuda.cupy)
@attr.chainerx
@attr.gpu
def test_radd_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: y.__radd__(x), cuda.cupy)
@attr.chainerx
@attr.gpu
def test_rsub_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: y.__rsub__(x), cuda.cupy)
@attr.chainerx
@attr.gpu
def test_rmul_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: y.__rmul__(x), cuda.cupy)
@attr.chainerx
@attr.gpu
def test_rdiv_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: y.__rtruediv__(x), cuda.cupy)
# TODO(hvy): Implement floor.
@pytest.mark.skip
@attr.chainerx
@attr.gpu
def test_rfloordiv_forward_chainerx_gpu(self):
pass
@attr.chainerx
@attr.gpu
def test_rpow_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: y.__rpow__(x), cuda.cupy)
def check_backward(self, op, x1_data, x2_data, y_grad):
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
gradient_check.check_backward(op, (x1_data, x2_data), y_grad,
dtype=numpy.float64, **options)
def backward_cpu(self, op):
self.check_backward(op, self.x1, self.x2, self.gy)
def test_add_backward_cpu(self):
self.backward_cpu(lambda x, y: x + y)
def test_sub_backward_cpu(self):
self.backward_cpu(lambda x, y: x - y)
def test_mul_backward_cpu(self):
self.backward_cpu(lambda x, y: x * y)
def test_div_backward_cpu(self):
self.backward_cpu(lambda x, y: x / y)
def test_pow_backward_cpu(self):
self.backward_cpu(lambda x, y: x ** y)
def backward_gpu(self, op):
self.check_backward(
op, cuda.to_gpu(self.x1), cuda.to_gpu(self.x2),
cuda.to_gpu(self.gy))
@attr.gpu
def test_add_backward_gpu(self):
self.backward_gpu(lambda x, y: x + y)
@attr.gpu
def test_sub_backward_gpu(self):
self.backward_gpu(lambda x, y: x - y)
@attr.gpu
def test_mul_backward_gpu(self):
self.backward_gpu(lambda x, y: x * y)
@attr.gpu
def test_div_backward_gpu(self):
self.backward_gpu(lambda x, y: x / y)
@attr.gpu
def test_pow_backward_gpu(self):
self.backward_gpu(lambda x, y: x ** y)
def backward_chainerx(self, op):
self.check_backward(
op, chainerx.array(self.x1), chainerx.array(self.x2),
chainerx.array(self.gy))
@attr.chainerx
def test_add_backward_chainerx(self):
self.backward_chainerx(lambda x, y: x + y)
@attr.chainerx
def test_sub_backward_chainerx(self):
self.backward_chainerx(lambda x, y: x - y)
@attr.chainerx
def test_mul_backward_chainerx(self):
self.backward_chainerx(lambda x, y: x * y)
@attr.chainerx
def test_div_backward_chainerx(self):
self.backward_chainerx(lambda x, y: x / y)
@attr.chainerx
def test_pow_backward_chainerx(self):
self.backward_chainerx(lambda x, y: x ** y)
def check_double_backward(
self, op, x1_data, x2_data, y_grad, ggx1_data, ggx2_data, **args):
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
options.update(args)
gradient_check.check_double_backward(
op, (x1_data, x2_data), y_grad, (ggx1_data, ggx2_data),
dtype=numpy.float64, **options)
def double_backward_cpu(self, op, **options):
self.check_double_backward(
op, self.x1, self.x2, self.gy, self.ggx1, self.ggx2,
**options)
def test_div_double_backward_cpu(self):
self.double_backward_cpu(lambda x, y: x / y, atol=5e-2, rtol=5e-2)
def test_pow_double_backward_cpu(self):
self.double_backward_cpu(lambda x, y: x ** y)
def test_rpow_double_backward_cpu(self):
self.double_backward_cpu(lambda x, y: y.__rpow__(x))
def double_backward_gpu(self, op, **options):
self.check_double_backward(
op, cuda.to_gpu(self.x1), cuda.to_gpu(self.x2),
cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx1), cuda.to_gpu(self.ggx2), **options)
@attr.gpu
def test_div_double_backward_gpu(self):
self.double_backward_gpu(lambda x, y: x / y, atol=5e-2, rtol=5e-2)
@attr.gpu
def test_pow_double_backward_gpu(self):
self.double_backward_gpu(lambda x, y: x ** y)
@attr.gpu
def test_rpow_double_backward_gpu(self):
self.double_backward_gpu(lambda x, y: y.__rpow__(x))
def double_backward_chainerx(self, op, **options):
self.check_double_backward(
op, chainerx.array(self.x1), chainerx.array(self.x2),
chainerx.array(self.gy),
chainerx.array(self.ggx1), chainerx.array(self.ggx2), **options)
@attr.chainerx
def test_div_double_backward_chainerx(self):
self.double_backward_chainerx(lambda x, y: x / y, atol=5e-2, rtol=5e-2)
@attr.chainerx
def test_pow_double_backward_chainerx(self):
self.double_backward_chainerx(lambda x, y: x ** y)
@attr.chainerx
def test_rpow_double_backward_chainerx(self):
self.double_backward_chainerx(lambda x, y: y.__rpow__(x))
@testing.parameterize(*testing.product({
'in_shapes': [
((3, 2),) * 3,
((),) * 3,
((1, 3), (), (2, 1, 2, 1)),
((), (2, 1, 2), (3, 1)),
((3, 1), (1, 1), (2,)),
],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@backend.inject_backend_tests(
['test_forward', 'test_backward', 'test_double_backward'],
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
}))
class TestMultipleAdd(unittest.TestCase):
def setUp(self):
x1_shape, x2_shape, x3_shape = self.in_shapes
self.x1 = numpy.random.uniform(.5, 1, x1_shape).astype(self.dtype)
self.x2 = numpy.random.uniform(.5, 1, x2_shape).astype(self.dtype)
self.x3 = numpy.random.uniform(.5, 1, x3_shape).astype(self.dtype)
y_shape = numpy.broadcast(self.x1, self.x2, self.x3).shape
self.gy = numpy.random.uniform(-1, 1, y_shape).astype(self.dtype)
self.ggx1 = numpy.random.uniform(-1, 1, x1_shape).astype(self.dtype)
self.ggx2 = numpy.random.uniform(-1, 1, x2_shape).astype(self.dtype)
self.ggx3 = numpy.random.uniform(-1, 1, x3_shape).astype(self.dtype)
def check_forward(self, func, x1_data, x2_data, x3_data, backend_config):
# convert to cupy.ndarray for GPU tests
if backend_config.use_cuda:
x1_data, x2_data, x3_data = cuda.to_gpu(
(x1_data, x2_data, x3_data))
x1 = chainer.Variable(x1_data)
x2 = chainer.Variable(x2_data)
x3 = chainer.Variable(x3_data)
with backend_config:
y = func(x1, x2, x3)
options = {}
if self.dtype == numpy.float16:
options = {'atol': 1e-4, 'rtol': 1e-3}
testing.assert_allclose(
(self.x1 + self.x2 + self.x3), y.data, **options)
def forward_cpu(self, func, backend_config):
self.check_forward(func, self.x1, self.x2, self.x3, backend_config)
def test_forward(self, backend_config):
func = chainer.functions.add
self.forward_cpu(func, backend_config)
def check_backward(self, func, x1_data, x2_data, x3_data, y_grad,
backend_config):
# convert to cupy.ndarray for GPU tests
if backend_config.use_cuda:
x1_data, x2_data, x3_data, y_grad = cuda.to_gpu(
(x1_data, x2_data, x3_data, y_grad))
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
with backend_config:
gradient_check.check_backward(func, (x1_data, x2_data, x3_data),
y_grad,
dtype=numpy.float64, **options)
def backward_cpu(self, func, backend_config):
self.check_backward(
func, self.x1, self.x2, self.x3, self.gy, backend_config)
def test_backward(self, backend_config):
func = chainer.functions.add
self.backward_cpu(func, backend_config)
def check_double_backward(
self, func, backend_config, x1_data, x2_data, x3_data, y_grad,
ggx1_data, ggx2_data, ggx3_data, **args):
# convert to cupy.ndarray for GPU tests
if backend_config.use_cuda:
(x1_data, x2_data, x3_data, y_grad,
ggx1_data, ggx2_data, ggx3_data) = cuda.to_gpu(
(x1_data, x2_data, x3_data, y_grad,
ggx1_data, ggx2_data, ggx3_data))
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
options.update(args)
with backend_config:
gradient_check.check_double_backward(
func, (x1_data, x2_data, x3_data), y_grad,
(ggx1_data,
ggx2_data, ggx3_data),
dtype=numpy.float64, **options)
def double_backward_cpu(self, func, backend_config, **options):
self.check_double_backward(
func, backend_config, self.x1, self.x2, self.x3, self.gy,
self.ggx1, self.ggx2, self.ggx3,
**options)
def test_double_backward(self, backend_config):
func = chainer.functions.add
self.double_backward_cpu(func, backend_config, atol=5e-2, rtol=5e-2)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestBinaryOpConstant(unittest.TestCase):
def _test_constant_one(self, func, lhs, rhs, gpu=False):
if gpu:
lhs = cuda.to_gpu(lhs)
x = chainer.Variable(lhs)
y = func(x, rhs)
self.assertEqual(y.data.dtype, self.dtype)
y.backward()
self.assertEqual(x.grad.dtype, self.dtype)
def _test_constant(self, func):
x_data = numpy.array(1, self.dtype)
self._test_constant_one(func, x_data, 1)
self._test_constant_one(func, x_data, 1.0)
self._test_constant_one(func, x_data, numpy.int64(1))
self._test_constant_one(func, x_data, numpy.float64(1.0))
def _test_constant_gpu(self, func):
x_data = numpy.array(1, self.dtype)
self._test_constant_one(func, x_data, 1, True)
self._test_constant_one(func, x_data, 1.0, True)
self._test_constant_one(func, x_data, numpy.int64(1), True)
self._test_constant_one(func, x_data, numpy.float64(1), True)
def _test_constant_array_one(self, func, lhs, rhs):
x = chainer.Variable(lhs)
y = func(x, rhs)
self.assertEqual(y.data.dtype, self.dtype)
y.grad = numpy.ones_like(y.data, self.dtype)
y.backward()
self.assertEqual(x.grad.dtype, self.dtype)
def _test_constant_array(self, func):
x_data = numpy.array([1.0, 2.0], self.dtype)
self._test_constant_array_one(
func, x_data, numpy.array([3.0, 4.0], self.dtype))
with pytest.raises(TypeError):
self._test_constant_array_one(func, x_data, [3.0, 4.0])
with pytest.raises(TypeError):
self._test_constant_array_one(func, x_data, (3.0, 4.0))
with pytest.raises(TypeError):
self._test_constant_array_one(func, x_data, [3.0, 4.0, 5.0])
with pytest.raises(TypeError):
self._test_constant_array_one(func, x_data, (3.0, 4.0, 5.0))
with pytest.raises(type_check.InvalidType):
self._test_constant_array_one(
func, x_data, numpy.array([3.0, 4.0, 5.0], self.dtype))
def _test_constant_array_gpu_one(self, func, lhs, rhs):
x = chainer.Variable(cuda.to_gpu(lhs))
y = func(x, rhs)
self.assertEqual(y.data.dtype, self.dtype)
y.grad = cuda.cupy.ones_like(y.data).astype(self.dtype)
y.backward()
self.assertEqual(x.grad.dtype, self.dtype)
def _test_constant_array_gpu(self, func, exception=TypeError):
x_data = numpy.array([1.0, 2.0], self.dtype)
self._test_constant_array_gpu_one(
func, x_data, cuda.to_gpu(numpy.array([3.0, 4.0], self.dtype)))
with pytest.raises(exception):
self._test_constant_array_one(
func, x_data, cuda.to_gpu(
numpy.array([3.0, 4.0, 5.0], self.dtype)))
def test_add_constant(self):
self._test_constant(lambda x, y: x + y)
@attr.gpu
def test_add_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x + y)
def test_add_constant_array(self):
self._test_constant_array(lambda x, y: x + y)
@attr.gpu
def test_add_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: x + y)
def test_radd_constant(self):
self._test_constant(lambda x, y: y + x)
@attr.gpu
def test_radd_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y + x)
def test_radd_constant_array(self):
self._test_constant_array(lambda x, y: y + x)
@attr.gpu
def test_radd_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: y + x)
def test_sub_constant(self):
self._test_constant(lambda x, y: x - y)
@attr.gpu
def test_sub_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x - y)
def test_sub_constant_array(self):
self._test_constant_array(lambda x, y: x - y)
@attr.gpu
def test_sub_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: x - y)
def test_rsub_constant(self):
self._test_constant(lambda x, y: y - x)
@attr.gpu
def test_rsub_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y - x)
def test_rsub_constant_array(self):
self._test_constant_array(lambda x, y: y - x)
@attr.gpu
def test_rsub_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: y - x)
def test_mul_constant(self):
self._test_constant(lambda x, y: x * y)
@attr.gpu
def test_mul_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x * y)
def test_mul_constant_array(self):
self._test_constant_array(lambda x, y: x * y)
@attr.gpu
def test_mul_constant_array_gpu(self):
self._test_constant_array(lambda x, y: x * y)
def test_rmul_constant(self):
self._test_constant(lambda x, y: y * x)
@attr.gpu
def test_rmul_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y * x)
def test_rmul_constant_array(self):
self._test_constant_array(lambda x, y: y * x)
@attr.gpu
def test_rmul_constant_array_gpu(self):
# _test_constant_array_one throws pycuda._pvt_struct.error
self._test_constant_array_gpu(lambda x, y: y * x, exception=Exception)
def test_div_constant(self):
self._test_constant(lambda x, y: x / y)
@attr.gpu
def test_div_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x / y)
def test_div_constant_array(self):
self._test_constant_array(lambda x, y: x / y)
@attr.gpu
def test_div_constant_array_gpu(self):
# _test_constant_array_one throws pycuda._pvt_struct.error
self._test_constant_array_gpu(lambda x, y: x / y, exception=Exception)
def test_rdiv_constant(self):
self._test_constant(lambda x, y: y / x)
@attr.gpu
def test_rdiv_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y / x)
def test_rdiv_constant_array(self):
self._test_constant_array(lambda x, y: y / x)
@attr.gpu
def test_rdiv_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: y / x)
def test_pow_constant(self):
self._test_constant(lambda x, y: x ** y)
@attr.gpu
def test_pow_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x ** y)
def test_pow_constant_array(self):
self._test_constant_array(lambda x, y: x ** y)
@attr.gpu
def test_pow_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: x ** y, exception=TypeError)
def test_rpow_constant(self):
self._test_constant(lambda x, y: y ** x)
@attr.gpu
def test_rpow_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y ** x)
def test_rpow_constant_array(self):
self._test_constant_array(lambda x, y: y ** x)
@attr.gpu
def test_rpow_constant_array_gpu(self):
# _test_constant_array_one throws pycuda._pvt_struct.error
self._test_constant_array_gpu(lambda x, y: y ** x, exception=Exception)
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestVariableConstantOp(unittest.TestCase):
def make_date(self):
raise NotImplementedError()
def setUp(self):
self.x = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype)
self.gy = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype)
self.value = 0.5
def check_forward(self, op, x_data):
x = chainer.Variable(x_data)
y = op(x, self.value)
if self.dtype == numpy.float16:
atol = 5e-4
rtol = 5e-4
else:
atol = 1e-7
rtol = 1e-7
testing.assert_allclose(
op(self.x, self.value), y.data, atol=atol, rtol=rtol)
def forward_cpu(self, op):
self.check_forward(op, self.x)
def test_add_forward_cpu(self):
self.forward_cpu(lambda x, y: x + y)
def test_radd_forward_cpu(self):
self.forward_cpu(lambda x, y: y + x)
def test_sub_forward_cpu(self):
self.forward_cpu(lambda x, y: x - y)
def test_rsub_forward_cpu(self):
self.forward_cpu(lambda x, y: y - x)
def test_mul_forward_cpu(self):
self.forward_cpu(lambda x, y: x * y)
def test_rmul_forward_cpu(self):
self.forward_cpu(lambda x, y: y * x)
def test_div_forward_cpu(self):
self.forward_cpu(lambda x, y: x / y)
def test_rdiv_forward_cpu(self):
self.forward_cpu(lambda x, y: y / x)
def test_pow_forward_cpu(self):
self.forward_cpu(lambda x, y: x ** y)
def test_rpow_forward_cpu(self):
self.forward_cpu(lambda x, y: y ** x)
def forward_gpu(self, op):
self.check_forward(op, cuda.to_gpu(self.x))
@attr.gpu
def test_add_forward_gpu(self):
self.forward_gpu(lambda x, y: x + y)
@attr.gpu
def test_radd_forward_gpu(self):
self.forward_gpu(lambda x, y: y + x)
@attr.gpu
def test_sub_forward_gpu(self):
self.forward_gpu(lambda x, y: x - y)
@attr.gpu
def test_rsub_forward_gpu(self):
self.forward_gpu(lambda x, y: y - x)
@attr.gpu
def test_mul_forward_gpu(self):
self.forward_gpu(lambda x, y: x * y)
@attr.gpu
def test_rmul_forward_gpu(self):
self.forward_gpu(lambda x, y: y * x)
@attr.gpu
def test_div_forward_gpu(self):
self.forward_gpu(lambda x, y: x / y)
@attr.gpu
def test_rdiv_forward_gpu(self):
self.forward_gpu(lambda x, y: y / x)
@attr.gpu
def test_pow_forward_gpu(self):
self.forward_gpu(lambda x, y: x ** y)
@attr.gpu
def test_rpow_forward_gpu(self):
self.forward_gpu(lambda x, y: y ** x)
def forward_chainerx(self, op, orig_xp):
xs_chx = arrays_to_chainerx(orig_xp, (self.x,))
self.check_forward(op, *xs_chx)
@attr.chainerx
def test_add_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: x + y, numpy)
@attr.chainerx
def test_radd_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: y + x, numpy)
@attr.chainerx
def test_sub_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: x - y, numpy)
@attr.chainerx
def test_rsub_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: y - x, numpy)
@attr.chainerx
def test_mul_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: x * y, numpy)
@attr.chainerx
def test_rmul_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: y * x, numpy)
@attr.chainerx
def test_div_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: x / y, numpy)
@attr.chainerx
def test_rdiv_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: y / x, numpy)
@attr.chainerx
def test_pow_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: x ** y, numpy)
@attr.chainerx
def test_rpow_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: y ** x, numpy)
@attr.chainerx
@attr.gpu
def test_add_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: x + y, cuda.cupy)
@attr.chainerx
@attr.gpu
def test_radd_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: y + x, cuda.cupy)
@attr.chainerx
@attr.gpu
def test_sub_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: x - y, cuda.cupy)
@attr.chainerx
@attr.gpu
def test_rsub_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: y - x, cuda.cupy)
@attr.chainerx
@attr.gpu
def test_mul_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: x * y, cuda.cupy)
@attr.chainerx
@attr.gpu
def test_rmul_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: y * x, cuda.cupy)
@attr.chainerx
@attr.gpu
def test_div_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: x / y, cuda.cupy)
@attr.chainerx
@attr.gpu
def test_rdiv_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: y / x, cuda.cupy)
@attr.chainerx
@attr.gpu
def test_pow_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: x ** y, cuda.cupy)
@attr.chainerx
@attr.gpu
def test_rpow_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: y ** x, cuda.cupy)
def check_backward(self, op, x_data, y_grad):
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
gradient_check.check_backward(lambda x: op(x, self.value),
x_data, y_grad,
dtype=numpy.float64, **options)
def backward_cpu(self, op):
self.check_backward(op, self.x, self.gy)
def test_add_backward_cpu(self):
self.backward_cpu(lambda x, y: x + y)
def test_radd_backward_cpu(self):
self.backward_cpu(lambda x, y: y + x)
def test_sub_backward_cpu(self):
self.backward_cpu(lambda x, y: x - y)
def test_rsub_backward_cpu(self):
self.backward_cpu(lambda x, y: y - x)
def test_mul_backward_cpu(self):
self.backward_cpu(lambda x, y: x * y)
def test_rmul_backward_cpu(self):
self.backward_cpu(lambda x, y: y * x)
def test_div_backward_cpu(self):
self.backward_cpu(lambda x, y: x / y)
def test_rdiv_backward_cpu(self):
self.backward_cpu(lambda x, y: y / x)
def test_pow_backward_cpu(self):
self.backward_cpu(lambda x, y: x ** y)
def test_rpow_backward_cpu(self):
self.backward_cpu(lambda x, y: y ** x)
def backward_gpu(self, op):
self.check_backward(op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
def test_add_backward_gpu(self):
self.backward_gpu(lambda x, y: x + y)
@attr.gpu
def test_radd_backward_gpu(self):
self.backward_gpu(lambda x, y: y + x)
@attr.gpu
def test_sub_backward_gpu(self):
self.backward_gpu(lambda x, y: x - y)
@attr.gpu
def test_rsub_backward_gpu(self):
self.backward_gpu(lambda x, y: y - x)
@attr.gpu
def test_mul_backward_gpu(self):
self.backward_gpu(lambda x, y: x * y)
@attr.gpu
def test_rmul_backward_gpu(self):
self.backward_gpu(lambda x, y: y * x)
@attr.gpu
def test_div_backward_gpu(self):
self.backward_gpu(lambda x, y: x / y)
@attr.gpu
def test_rdiv_backward_gpu(self):
self.backward_gpu(lambda x, y: y / x)
@attr.gpu
def test_pow_backward_gpu(self):
self.backward_gpu(lambda x, y: x ** y)
@attr.gpu
def test_rpow_backward_gpu(self):
self.backward_gpu(lambda x, y: y ** x)
def backward_chainerx(self, op):
self.check_backward(
op, chainerx.array(self.x), chainerx.array(self.gy))
@attr.chainerx
def test_add_backward_chainerx(self):
self.backward_chainerx(lambda x, y: x + y)
@attr.chainerx
def test_radd_backward_chainerx(self):
self.backward_chainerx(lambda x, y: y + x)
@attr.chainerx
def test_sub_backward_chainerx(self):
self.backward_chainerx(lambda x, y: x - y)
@attr.chainerx
def test_rsub_backward_chainerx(self):
self.backward_chainerx(lambda x, y: y - x)
@attr.chainerx
def test_mul_backward_chainerx(self):
self.backward_chainerx(lambda x, y: x * y)
@attr.chainerx
def test_rmul_backward_chainerx(self):
self.backward_chainerx(lambda x, y: y * x)
@attr.chainerx
def test_div_backward_chainerx(self):
self.backward_chainerx(lambda x, y: x / y)
@attr.chainerx
def test_rdiv_backward_chainerx(self):
self.backward_chainerx(lambda x, y: y / x)
@attr.chainerx
def test_pow_backward_chainerx(self):
self.backward_chainerx(lambda x, y: x ** y)
@attr.chainerx
def test_rpow_backward_chainerx(self):
self.backward_chainerx(lambda x, y: y ** x)
def check_double_backward(self, op, x_data, y_grad, x_grad_grad):
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
def _op(x):
return op(x, self.value)
gradient_check.check_double_backward(
_op, x_data, y_grad, x_grad_grad, dtype=numpy.float64, **options)
def double_backward_cpu(self, op):
self.check_double_backward(op, self.x, self.gy, self.ggx)
def test_pow_double_backward_cpu(self):
self.double_backward_cpu(lambda x, y: x ** y)
def test_rpow_double_backward_cpu(self):
self.double_backward_cpu(lambda x, y: y ** x)
def test_rdiv_double_backward_cpu(self):
self.double_backward_cpu(lambda x, y: y / x)
def double_backward_gpu(self, op):
self.check_double_backward(
op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx))
@attr.gpu
def test_pow_double_backward_gpu(self):
self.double_backward_gpu(lambda x, y: x ** y)
@attr.gpu
def test_rpow_double_backward_gpu(self):
self.double_backward_gpu(lambda x, y: y ** x)
@attr.gpu
def test_rdiv_double_backward_gpu(self):
self.double_backward_gpu(lambda x, y: y / x)
def double_backward_chainerx(self, op):
self.check_double_backward(
op, chainerx.array(self.x), chainerx.array(self.gy),
chainerx.array(self.ggx))
@attr.chainerx
def test_pow_double_backward_chainerx(self):
# TODO(niboshi): Support it
raise unittest.SkipTest('chainerx.broadcast is required')
self.double_backward_chainerx(lambda x, y: x ** y)
@attr.chainerx
def test_rpow_double_backward_chainerx(self):
# TODO(niboshi): Support it
raise unittest.SkipTest(
'chainerx.log with scalar argument is required')
self.double_backward_chainerx(lambda x, y: y ** x)
@attr.chainerx
def test_rdiv_double_backward_chainerx(self):
# TODO(niboshi): Support it
raise unittest.SkipTest('chainerx.broadcast is required')
self.double_backward_chainerx(lambda x, y: y / x)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestVariableConstantArrayOp(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(.5, 1, (3, 2)).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype)
self.ggx = numpy.random.uniform(.5, 1, (3, 2)).astype(self.dtype)
self.value = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype)
# Avoid overflow in div test (especially backward)
self.value[abs(self.value) < 1e-2] = 1.
def check_forward(self, op, array_conv, positive):
value = self.value
if positive:
value = numpy.abs(value)
v = array_conv(value)
x = chainer.Variable(array_conv(self.x))
y = op(x, v)
if self.dtype == numpy.float16:
tol = 1e-3
else:
tol = 1e-6
testing.assert_allclose(
op(self.x, value), y.data, atol=tol, rtol=tol)
def forward_cpu(self, op, positive=False):
self.check_forward(op, lambda x: x, positive)
def test_add_forward_cpu(self):
self.forward_cpu(lambda x, y: x + y)
def test_radd_forward_cpu(self):
self.forward_cpu(lambda x, y: y + x)
def test_sub_forward_cpu(self):
self.forward_cpu(lambda x, y: x - y)
def test_rsub_forward_cpu(self):
self.forward_cpu(lambda x, y: y - x)
def test_mul_forward_cpu(self):
self.forward_cpu(lambda x, y: x * y)
def test_rmul_forward_cpu(self):
self.forward_cpu(lambda x, y: y * x)
def test_div_forward_cpu(self):
self.forward_cpu(lambda x, y: x / y)
def test_rdiv_forward_cpu(self):
self.forward_cpu(lambda x, y: y / x)
def test_pow_forward_cpu(self):
self.forward_cpu(lambda x, y: x ** y)
def test_rpow_forward_cpu(self):
self.forward_cpu(lambda x, y: y ** x, positive=True)
def forward_gpu(self, op, positive=False):
self.check_forward(op, cuda.to_gpu, positive)
@attr.gpu
def test_add_forward_gpu(self):
self.forward_gpu(lambda x, y: x + y)
@attr.gpu
def test_radd_forward_gpu(self):
self.forward_gpu(lambda x, y: y + x)
@attr.gpu
def test_sub_forward_gpu(self):
self.forward_gpu(lambda x, y: x - y)
@attr.gpu
def test_rsub_forward_gpu(self):
self.forward_gpu(lambda x, y: y - x)
@attr.gpu
def test_mul_forward_gpu(self):
self.forward_gpu(lambda x, y: x * y)
@attr.gpu
def test_rmul_forward_gpu(self):
self.forward_gpu(lambda x, y: y * x)
@attr.gpu
def test_div_forward_gpu(self):
self.forward_gpu(lambda x, y: x / y)
@attr.gpu
def test_rdiv_forward_gpu(self):
self.forward_gpu(lambda x, y: y / x)
@attr.gpu
def test_pow_forward_gpu(self):
self.forward_gpu(lambda x, y: x ** y)
@attr.gpu
def test_rpow_forward_gpu(self):
self.forward_gpu(lambda x, y: y ** x, positive=True)
def forward_chainerx(self, op, orig_xp, positive=False):
if orig_xp is numpy:
array_conv = chainer.backend.to_chx
else:
assert orig_xp is cuda.cupy
def array_conv(x):
return chainer.backend.to_chx(cuda.to_gpu(x))
self.check_forward(op, array_conv, positive)
@attr.chainerx
def test_pow_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: x ** y, numpy)
@attr.chainerx
def test_rpow_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x, y: y ** x, numpy, positive=True)
@attr.chainerx
@attr.gpu
def test_pow_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: x ** y, cuda.cupy)
@attr.chainerx
@attr.gpu
def test_rpow_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x, y: y ** x, cuda.cupy, positive=True)
def check_backward(self, op, x_data, y_grad, array_conv, positive):
value = self.value
if positive:
value = numpy.abs(value)
value = array_conv(value)
x_data = array_conv(x_data)
y_grad = array_conv(y_grad)
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
# numeric_gradient will cast `x` to float64, but not `value`.
# It's casted here.
def op_(x):
return op(
x,
value if x.dtype == value.dtype else value.astype(x.dtype))
gradient_check.check_backward(op_, x_data, y_grad,
dtype=numpy.float64, **options)
def backward_cpu(self, op, positive=False):
self.check_backward(op, self.x, self.gy, lambda x: x, positive)
def test_add_backward_cpu(self):
self.backward_cpu(lambda x, y: x + y)
def test_radd_backward_cpu(self):
self.backward_cpu(lambda x, y: y + x)
def test_sub_backward_cpu(self):
self.backward_cpu(lambda x, y: x - y)
def test_rsub_backward_cpu(self):
self.backward_cpu(lambda x, y: y - x)
def test_mul_backward_cpu(self):
self.backward_cpu(lambda x, y: x * y)
def test_rmul_backward_cpu(self):
self.backward_cpu(lambda x, y: y * x)
def test_div_backward_cpu(self):
self.backward_cpu(lambda x, y: x / y)
def test_rdiv_backward_cpu(self):
self.backward_cpu(lambda x, y: y / x)
def test_pow_backward_cpu(self):
self.backward_cpu(lambda x, y: x ** y)
def test_rpow_backward_cpu(self):
self.backward_cpu(lambda x, y: y ** x, positive=True)
def backward_gpu(self, op, positive=False):
self.check_backward(op, self.x, self.gy, cuda.to_gpu, positive)
@attr.gpu
def test_add_backward_gpu(self):
self.backward_gpu(lambda x, y: x + y)
@attr.gpu
def test_radd_backward_gpu(self):
self.backward_gpu(lambda x, y: y + x)
@attr.gpu
def test_sub_backward_gpu(self):
self.backward_gpu(lambda x, y: x - y)
@attr.gpu
def test_mul_backward_gpu(self):
self.backward_gpu(lambda x, y: x * y)
@attr.gpu
def test_rmul_backward_gpu(self):
self.backward_gpu(lambda x, y: y * x)
@attr.gpu
def test_div_backward_gpu(self):
self.backward_gpu(lambda x, y: x / y)
@attr.gpu
def test_rdiv_backward_gpu(self):
self.backward_gpu(lambda x, y: y / x)
@attr.gpu
def test_pow_backward_gpu(self):
self.backward_gpu(lambda x, y: x ** y)
@attr.gpu
def test_rpow_backward_gpu(self):
self.backward_gpu(lambda x, y: y ** x, positive=True)
def backward_chainerx(self, op, positive=False):
self.check_backward(op, self.x, self.gy, chainerx.array, positive)
@attr.chainerx
def test_add_backward_chainerx(self):
self.backward_chainerx(lambda x, y: x + y)
@attr.chainerx
def test_radd_backward_chainerx(self):
self.backward_chainerx(lambda x, y: y + x)
@attr.chainerx
def test_sub_backward_chainerx(self):
self.backward_chainerx(lambda x, y: x - y)
@attr.chainerx
def test_mul_backward_chainerx(self):
self.backward_chainerx(lambda x, y: x * y)
@attr.chainerx
def test_rmul_backward_chainerx(self):
self.backward_chainerx(lambda x, y: y * x)
@attr.chainerx
def test_div_backward_chainerx(self):
self.backward_chainerx(lambda x, y: x / y)
@attr.chainerx
def test_rdiv_backward_chainerx(self):
self.backward_chainerx(lambda x, y: y / x)
@attr.chainerx
def test_pow_backward_chainerx(self):
self.backward_chainerx(lambda x, y: x ** y)
@attr.chainerx
def test_rpow_backward_chainerx(self):
self.backward_chainerx(lambda x, y: y ** x, positive=True)
def check_double_backward(
self, op, x_data, y_grad, x_grad_grad, array_conv, positive):
value = self.value
if positive:
value = numpy.abs(value)
value = array_conv(value)
x_data = array_conv(x_data)
y_grad = array_conv(y_grad)
x_grad_grad = array_conv(x_grad_grad)
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
def _op(x):
return op(x, value)
gradient_check.check_double_backward(
_op, x_data, y_grad, x_grad_grad, dtype=numpy.float64, **options)
def double_backward_cpu(self, op, positive=False):
self.check_double_backward(
op, self.x, self.gy, self.ggx, lambda x: x, positive)
def test_pow_double_backward_cpu(self):
self.double_backward_cpu(lambda x, y: x ** y)
def test_rpow_double_backward_cpu(self):
self.double_backward_cpu(lambda x, y: y ** x, positive=True)
def double_backward_gpu(self, op, positive=False):
self.check_double_backward(
op, self.x, self.gy, self.ggx, cuda.to_gpu, positive)
@attr.gpu
def test_pow_double_backward_gpu(self):
self.double_backward_gpu(lambda x, y: x ** y)
@attr.gpu
def test_rpow_double_backward_gpu(self):
self.double_backward_gpu(lambda x, y: y ** x, positive=True)
def double_backward_chainerx(self, op, positive=False):
self.check_double_backward(
op, self.x, self.gy, self.ggx, chainerx.array, positive)
@attr.chainerx
def test_pow_double_backward_chainerx(self):
self.double_backward_chainerx(lambda x, y: x ** y)
@attr.chainerx
def test_rpow_double_backward_chainerx(self):
self.double_backward_chainerx(lambda x, y: y ** x, positive=True)
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestUnaryFunctions(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
for i in numpy.ndindex(self.shape):
if -0.1 < self.x[i] < 0.1:
self.x[i] = 0.5
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype)
def check_forward(self, op, op_np, x_data):
x = chainer.Variable(x_data)
y = op(x)
testing.assert_allclose(
op_np(self.x), y.data, atol=1e-7, rtol=1e-7)
def forward_cpu(self, op, op_np):
self.check_forward(op, op_np, self.x)
def test_neg_forward_cpu(self):
self.forward_cpu(lambda x: -x, lambda x: -x)
def test_abs_forward_cpu(self):
self.forward_cpu(lambda x: abs(x), lambda x: abs(x))
def forward_gpu(self, op, op_np):
self.check_forward(op, op_np, cuda.to_gpu(self.x))
@attr.gpu
def test_neg_forward_gpu(self):
self.forward_gpu(lambda x: -x, lambda x: -x)
@attr.gpu
def test_abs_forward_gpu(self):
self.forward_gpu(lambda x: abs(x), lambda x: abs(x))
def forward_chainerx(self, op, op_np, orig_xp):
xs_chx = arrays_to_chainerx(orig_xp, (self.x,))
self.check_forward(op, op_np, *xs_chx)
@attr.chainerx
def test_neg_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x: -x, lambda x: -x, numpy)
@attr.chainerx
def test_abs_forward_chainerx_cpu(self):
self.forward_chainerx(lambda x: abs(x), lambda x: abs(x), numpy)
@attr.chainerx
@attr.gpu
def test_neg_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x: -x, lambda x: -x, cuda.cupy)
@attr.chainerx
@attr.gpu
def test_abs_forward_chainerx_gpu(self):
self.forward_chainerx(lambda x: abs(x), lambda x: abs(x), cuda.cupy)
def check_backward(self, op, x_data, y_grad):
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
gradient_check.check_backward(
op, x_data, y_grad, dtype=numpy.float64, **options)
def backward_cpu(self, op):
self.check_backward(op, self.x, self.gy)
def test_neg_backward_cpu(self):
self.backward_cpu(lambda x: -x)
def test_abs_backward_cpu(self):
self.backward_cpu(lambda x: abs(x))
def backward_gpu(self, op):
self.check_backward(op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
def test_neg_backward_gpu(self):
self.backward_gpu(lambda x: -x)
@attr.gpu
def test_abs_backward_gpu(self):
self.backward_gpu(lambda x: abs(x))
def backward_chainerx(self, op):
self.check_backward(
op, chainerx.array(self.x), chainerx.array(self.gy))
@attr.chainerx
def test_neg_backward_chainerx(self):
self.backward_chainerx(lambda x: -x)
def check_double_backward(self, op, x_data, y_grad, x_grad_grad):
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
gradient_check.check_double_backward(
op, x_data, y_grad, x_grad_grad, dtype=numpy.float64, **options)
def double_backward_cpu(self, op):
self.check_double_backward(op, self.x, self.gy, self.ggx)
def test_neg_double_backward_cpu(self):
self.double_backward_cpu(lambda x: -x)
def test_abs_double_backward_cpu(self):
self.double_backward_cpu(lambda x: abs(x))
def double_backward_gpu(self, op):
self.check_double_backward(
op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx))
@attr.gpu
def test_neg_double_backward_gpu(self):
self.double_backward_gpu(lambda x: -x)
@attr.gpu
def test_abs_double_backward_gpu(self):
self.double_backward_gpu(lambda x: abs(x))
def double_backward_chainerx(self, op):
self.check_double_backward(
op, chainerx.array(self.x), chainerx.array(self.gy),
chainerx.array(self.ggx))
@attr.chainerx
def test_neg_double_backward_chainerx(self):
self.double_backward_chainerx(lambda x: -x)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestNegativePow(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 0, (3, 2)).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype)
self.ggx = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype)
def check_backward(self, x_data, y_grad):
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
gradient_check.check_backward(
lambda x: x ** 2, x_data, y_grad, dtype=numpy.float64, **options)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, x_grad_grad):
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
gradient_check.check_double_backward(
lambda x: x ** 2, x_data, y_grad, x_grad_grad, dtype=numpy.float64,
**options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
@testing.parameterize(*testing.product_dict(
[
{'left_const': False, 'right_const': False},
{'left_const': True, 'right_const': False},
{'left_const': False, 'right_const': True},
], [
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
], [
{'x_shape': (3, 2), 'y_shape': (2, 4), 'z_shape': (3, 4)},
{'x_shape': (2, 3, 2), 'y_shape': (2, 2, 4), 'z_shape': (2, 3, 4)},
{'x_shape': (2, 1, 3, 4),
'y_shape': (2, 4, 2),
'z_shape': (2, 2, 3, 2)},
{'x_shape': (5, 3, 2), 'y_shape': (2,), 'z_shape': (5, 3)},
{'x_shape': (2,), 'y_shape': (5, 2, 4), 'z_shape': (5, 4)},
{'x_shape': (2, 3, 2), 'y_shape': (2, 4), 'z_shape': (2, 3, 4)},
{'x_shape': (3,), 'y_shape': (3,), 'z_shape': ()},
]
))
@unittest.skipUnless(sys.version_info >= (3, 5),
'Only for Python3.5 or higher')
class TestMatMul(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
self.y = numpy.random.uniform(-1, 1, self.y_shape).astype(self.dtype)
self.gz = numpy.random.uniform(-1, 1, self.z_shape).astype(self.dtype)
self.ggx = numpy.random.uniform(
-1, 1, self.x_shape).astype(self.dtype)
self.ggy = numpy.random.uniform(
-1, 1, self.y_shape).astype(self.dtype)
def _get_forward_answer(self, x, y):
if x.ndim <= 2 or y.ndim == 1:
return numpy.dot(x, y)
elif hasattr(numpy, 'matmul'):
# Note: NumPy 1.14.0 has a bug in einsum (numpy/numpy#10343),
# so we use matmul if available to avoid it
return numpy.matmul(x, y)
else:
return numpy.einsum('...ij,...jk->...ik', x, y)
def check_forward(self, x_data, y_data):
if self.left_const:
x = x_data
else:
x = chainer.Variable(x_data)
if self.right_const:
y = y_data
else:
y = chainer.Variable(y_data)
z = operator.matmul(x, y)
if self.dtype == numpy.float16:
options = {'atol': 2e-3, 'rtol': 2e-3}
else:
options = {'atol': 2e-7, 'rtol': 2e-7}
testing.assert_allclose(
self._get_forward_answer(self.x, self.y), z.data, **options)
def test_forward_cpu(self):
self.check_forward(self.x, self.y)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.y))
def check_backward(self, x_data, y_data, z_grad):
if self.right_const:
def op(x):
return operator.matmul(x, y_data)
data = x_data,
elif self.left_const:
def op(y):
return operator.matmul(x_data, y)
data = y_data,
else:
op = operator.matmul
data = x_data, y_data
if self.dtype == numpy.float16:
options = {'atol': 1e-3, 'rtol': 1e-2}
else:
options = {'atol': 1e-4, 'rtol': 1e-4}
gradient_check.check_backward(
op, data, z_grad, dtype=numpy.float64, **options)
def test_backward_cpu(self):
self.check_backward(self.x, self.y, self.gz)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.y), cuda.to_gpu(self.gz))
def check_double_backward(
self, x_data, y_data, z_grad, x_grad_grad, y_grad_grad):
if self.right_const:
def op(x):
return operator.matmul(x, y_data.astype(x.dtype))
data = x_data,
grad_grad = x_grad_grad,
elif self.left_const:
def op(y):
return operator.matmul(x_data.astype(y.dtype), y)
data = y_data,
grad_grad = y_grad_grad,
else:
op = operator.matmul
data = x_data, y_data
grad_grad = x_grad_grad, y_grad_grad
if self.dtype == numpy.float16:
options = {'atol': 1e-3, 'rtol': 1e-2}
else:
options = {'atol': 1e-4, 'rtol': 1e-4}
gradient_check.check_double_backward(
op, data, z_grad, grad_grad, dtype=numpy.float64, **options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.y, self.gz, self.ggx, self.ggy)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.y), cuda.to_gpu(self.gz),
cuda.to_gpu(self.ggx), cuda.to_gpu(self.ggy))
@testing.parameterize(
{'x_shape': (), 'y_shape': ()},
{'x_shape': (3, 2), 'y_shape': ()},
{'x_shape': (), 'y_shape': (2, 4)},
{'x_shape': (2, 3), 'y_shape': (2, 3)},
{'x_shape': (2,), 'y_shape': (1,)},
)
@unittest.skipUnless(sys.version_info >= (3, 5),
'Only for Python3.5 or higher')
class TestMatMulInvalidShape(unittest.TestCase):
dtype = numpy.float32
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
self.y = numpy.random.uniform(-1, 1, self.y_shape).astype(self.dtype)
def test_invalid_type(self):
x = chainer.Variable(self.x)
y = chainer.Variable(self.y)
with pytest.raises(type_check.InvalidType):
operator.matmul(x, y)
class ConvertValueToStringTest(unittest.TestCase):
def _check_scalar(self, value, string):
self.assertEqual(basic_math._convert_value_to_string(value), string)
def test_integer_positive(self):
self._check_scalar(2, '2')
def test_integer_zero(self):
self._check_scalar(0, '0')
def test_integer_negative(self):
self._check_scalar(-2, '(-2)')
def test_float_positive(self):
self._check_scalar(2.0, '2.0')
def test_float_zero(self):
self._check_scalar(0.0, '0.0')
def test_float_negative(self):
self._check_scalar(-2.0, '(-2.0)')
def test_numpy_scalar(self):
self._check_scalar(numpy.float32(2), '2.0')
def _check_array(self, value, string):
self.assertEqual(basic_math._convert_value_to_string(value), string)
value = chainer.Variable(value)
self.assertEqual(basic_math._convert_value_to_string(value), string)
def test_array_cpu(self):
self._check_array(numpy.array([1, 2]), 'constant array')
@attr.gpu
def test_array_gpu(self):
self._check_array(cuda.ndarray([1, 2]), 'constant array')
class TestLabel(unittest.TestCase):
def test_neg(self):
self.assertEqual(basic_math.Neg().label, '__neg__')
def test_absolute(self):
self.assertEqual(basic_math.Absolute().label, '|_|')
def test_add(self):
self.assertEqual(basic_math.Add().label, '_ + _')
def test_add_constant(self):
self.assertEqual(basic_math.AddConstant(2.0).label, '_ + 2.0')
def test_sub(self):
self.assertEqual(basic_math.Sub().label, '_ - _')
def test_sub_from_constant(self):
self.assertEqual(basic_math.SubFromConstant(2.0).label, '2.0 - _')
def test_mul(self):
self.assertEqual(basic_math.Mul().label, '_ * _')
def test_mul_constant(self):
self.assertEqual(basic_math.MulConstant(2.0).label, '_ * 2.0')
def test_div(self):
self.assertEqual(basic_math.Div().label, '_ / _')
def test_div_from_constant(self):
self.assertEqual(basic_math.DivFromConstant(2.0).label, '2.0 / _')
def test_pow_var_var(self):
self.assertEqual(basic_math.PowVarVar().label, '_ ** _')
def test_pow_var_const(self):
self.assertEqual(basic_math.PowVarConst(2.0).label, '_ ** 2.0')
def test_pow_const_var(self):
self.assertEqual(basic_math.PowConstVar(2.0).label, '2.0 ** _')
testing.run_module(__name__, __file__)
| 58,895
| 30.512039
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_tensordot.py
|
import unittest
import numpy
import chainer
import chainer.functions as F
from chainer import testing
@testing.parameterize(*testing.product_dict(
[
{'a_shape': (4, 3, 2), 'b_shape': (3, 2, 5), 'axes': 2, 'gc_shape': (4, 5)}, # NOQA
{'a_shape': (4, 3, 2), 'b_shape': (3, 2, 5), 'axes': ([1, 2], [0, 1]), 'gc_shape': (4, 5)}, # NOQA
{'a_shape': (4, 2, 3), 'b_shape': (3, 5, 2), 'axes': ([2, 1], [0, 2]), 'gc_shape': (4, 5)}, # NOQA
{'a_shape': (2, 4, 3), 'b_shape': (5, 3, 2), 'axes': ([2, 0], [1, 2]), 'gc_shape': (4, 5)}, # NOQA
{'a_shape': (2, 3, 4), 'b_shape': (5, 2, 3), 'axes': ([1, 0], [2, 1]), 'gc_shape': (4, 5)}, # NOQA
{'a_shape': (3, 2, 4), 'b_shape': (2, 5, 3), 'axes': ([0, 1], [2, 0]), 'gc_shape': (4, 5)}, # NOQA
{'a_shape': (3, 4, 2), 'b_shape': (2, 3, 5), 'axes': ([0, 2], [1, 0]), 'gc_shape': (4, 5)}, # NOQA
{'a_shape': (3, 4, 2), 'b_shape': (2, 5, 6), 'axes': 1, 'gc_shape': (3, 4, 5, 6)}, # NOQA
{'a_shape': (3, 4, 2), 'b_shape': (2, 5, 6), 'axes': ([2, 0]), 'gc_shape': (3, 4, 5, 6)}, # NOQA
{'a_shape': (3, 2, 4), 'b_shape': (5, 2, 6), 'axes': ([1, 1]), 'gc_shape': (3, 4, 5, 6)}, # NOQA
{'a_shape': (2, 3, 4), 'b_shape': (5, 6, 2), 'axes': ([0, 2]), 'gc_shape': (3, 4, 5, 6)}, # NOQA
{'a_shape': (4, 5, 3, 2), 'b_shape': (3, 2, 6), 'axes': 2, 'gc_shape': (4, 5, 6)}, # NOQA
{'a_shape': (4, 5, 3, 2), 'b_shape': (3, 2, 6), 'axes': ([2, 3], [0, 1]), 'gc_shape': (4, 5, 6)}, # NOQA
{'a_shape': (4, 5, 2, 3), 'b_shape': (3, 6, 2), 'axes': ([3, 2], [0, 2]), 'gc_shape': (4, 5, 6)}, # NOQA
{'a_shape': (4, 2, 5, 3), 'b_shape': (6, 3, 2), 'axes': ([3, 1], [1, 2]), 'gc_shape': (4, 5, 6)}, # NOQA
{'a_shape': (2, 4, 5, 3), 'b_shape': (6, 2, 3), 'axes': ([3, 0], [2, 1]), 'gc_shape': (4, 5, 6)}, # NOQA
{'a_shape': (2, 4, 3, 5), 'b_shape': (2, 6, 3), 'axes': ([2, 0], [2, 0]), 'gc_shape': (4, 5, 6)}, # NOQA
{'a_shape': (2, 3, 4, 5), 'b_shape': (2, 3, 6), 'axes': ([1, 0], [1, 0]), 'gc_shape': (4, 5, 6)}, # NOQA
{'a_shape': (3, 2, 4, 5), 'b_shape': (3, 2, 6), 'axes': ([0, 1], [0, 1]), 'gc_shape': (4, 5, 6)}, # NOQA
{'a_shape': (3, 2, 5, 4), 'b_shape': (3, 6, 2), 'axes': ([0, 1], [0, 2]), 'gc_shape': (5, 4, 6)}, # NOQA
{'a_shape': (3, 5, 2, 4), 'b_shape': (6, 3, 2), 'axes': ([0, 2], [1, 2]), 'gc_shape': (5, 4, 6)}, # NOQA
{'a_shape': (5, 3, 2, 4), 'b_shape': (6, 2, 3), 'axes': ([1, 2], [2, 1]), 'gc_shape': (5, 4, 6)}, # NOQA
{'a_shape': (5, 4, 3, 2), 'b_shape': (4, 3, 2, 6), 'axes': 3, 'gc_shape': (5, 6)}, # NOQA
{'a_shape': (5, 4, 3, 2), 'b_shape': (4, 3, 2, 6), 'axes': ([1, 2, 3], [0, 1, 2]), 'gc_shape': (5, 6)}, # NOQA
{'a_shape': (5, 4, 2, 3), 'b_shape': (4, 3, 6, 2), 'axes': ([1, 3, 2], [0, 1, 3]), 'gc_shape': (5, 6)}, # NOQA
{'a_shape': (5, 2, 4, 3), 'b_shape': (4, 6, 3, 2), 'axes': ([2, 3, 1], [0, 2, 3]), 'gc_shape': (5, 6)}, # NOQA
{'a_shape': (2, 5, 4, 3), 'b_shape': (4, 6, 2, 3), 'axes': ([2, 3, 0], [0, 3, 2]), 'gc_shape': (5, 6)}, # NOQA
{'a_shape': (2, 5, 3, 4), 'b_shape': (6, 4, 2, 3), 'axes': ([3, 2, 0], [1, 3, 2]), 'gc_shape': (5, 6)}, # NOQA
{'a_shape': (2, 3, 5, 4), 'b_shape': (6, 2, 4, 3), 'axes': ([3, 1, 0], [2, 3, 1]), 'gc_shape': (5, 6)}, # NOQA
{'a_shape': (3, 2, 5, 4), 'b_shape': (6, 2, 3, 4), 'axes': ([3, 0, 1], [3, 2, 1]), 'gc_shape': (5, 6)}, # NOQA
{'a_shape': (3, 2, 4, 5), 'b_shape': (2, 6, 3, 4), 'axes': ([2, 0, 1], [3, 2, 0]), 'gc_shape': (5, 6)}, # NOQA
{'a_shape': (3, 4, 2, 5), 'b_shape': (2, 3, 6, 4), 'axes': ([1, 0, 2], [3, 1, 0]), 'gc_shape': (5, 6)}, # NOQA
{'a_shape': (4, 3, 2, 5), 'b_shape': (2, 3, 4, 6), 'axes': ([0, 1, 2], [2, 1, 0]), 'gc_shape': (5, 6)}, # NOQA
{'a_shape': (4, 3, 5, 2), 'b_shape': (3, 2, 4, 6), 'axes': ([0, 1, 3], [2, 0, 1]), 'gc_shape': (5, 6)}, # NOQA
{'a_shape': (4, 5, 3, 2), 'b_shape': (3, 4, 2, 6), 'axes': ([0, 2, 3], [1, 0, 2]), 'gc_shape': (5, 6)}, # NOQA
{'a_shape': (3, 2), 'b_shape': (2, 4), 'axes': 1, 'gc_shape': (3, 4)}, # NOQA
{'a_shape': (3, 2), 'b_shape': (2, 4), 'axes': (1, 0), 'gc_shape': (3, 4)}, # NOQA
{'a_shape': (3, 2), 'b_shape': (4, 2), 'axes': (1, 1), 'gc_shape': (3, 4)}, # NOQA
{'a_shape': (2, 3), 'b_shape': (4, 2), 'axes': (0, 1), 'gc_shape': (3, 4)}, # NOQA
{'a_shape': (2, 3), 'b_shape': (2, 4), 'axes': (0, 0), 'gc_shape': (3, 4)}, # NOQA
{'a_shape': (), 'b_shape': (), 'axes': 0, 'gc_shape': ()}, # NOQA
{'a_shape': (2), 'b_shape': (3), 'axes': 0, 'gc_shape': (2, 3)}, # NOQA
{'a_shape': (), 'b_shape': (2, 3), 'axes': 0, 'gc_shape': (2, 3)}, # NOQA
{'a_shape': (2, 3), 'b_shape': (), 'axes': 0, 'gc_shape': (2, 3)}, # NOQA
{'a_shape': (2, 3), 'b_shape': (4), 'axes': 0, 'gc_shape': (2, 3, 4)}, # NOQA
{'a_shape': (2), 'b_shape': (3, 4), 'axes': 0, 'gc_shape': (2, 3, 4)}, # NOQA
],
[
{'a_dtype': numpy.float16},
{'a_dtype': numpy.float32},
{'a_dtype': numpy.float64},
],
[
{'b_dtype': numpy.float16},
{'b_dtype': numpy.float32},
{'b_dtype': numpy.float64},
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1']
})
)
class TestTensorDot(testing.FunctionTestCase):
def setUp(self):
if self.a_dtype == numpy.float16 or self.b_dtype == numpy.float16:
self.check_forward_options.update({'atol': 1e-3, 'rtol': 1e-3})
self.check_backward_options.update({'atol': 1e-2, 'rtol': 5e-2})
self.check_double_backward_options.update(
{'atol': 1e-2, 'rtol': 1e-2})
def generate_inputs(self):
a = self._setup_tensor(.5, 1, self.a_shape, self.a_dtype)
b = self._setup_tensor(.5, 1, self.b_shape, self.b_dtype)
return a, b
def forward_expected(self, inputs):
a, b = inputs
y_expect = numpy.tensordot(a, b, self.axes)
return y_expect,
def forward(self, inputs, device):
a, b = inputs
y = F.tensordot(a, b, axes=self.axes)
return y,
def _setup_tensor(self, _min, _max, shape, dtype):
return numpy.random.uniform(_min, _max, shape).astype(dtype)
class TestTensorDotInvalid(unittest.TestCase):
def test_invalid_shape(self):
a_data = numpy.zeros((4, 3, 2), dtype=numpy.float32)
b_data = numpy.zeros((2, 3, 5), dtype=numpy.float32)
a = chainer.Variable(a_data)
b = chainer.Variable(b_data)
with self.assertRaises(ValueError):
F.tensordot(a, b)
with self.assertRaises(ValueError):
F.tensordot(a, b, axes=((1, 2), (0, 1)))
with self.assertRaises(ValueError):
F.tensordot(a, b, axes=((0), (0)))
with self.assertRaises(ValueError):
F.tensordot(a, b, axes=((2), (2)))
def test_invalid_axes(self):
a_data = numpy.zeros((4, 3, 2), dtype=numpy.float32)
b_data = numpy.zeros((3, 2, 5), dtype=numpy.float32)
a = chainer.Variable(a_data)
b = chainer.Variable(b_data)
with self.assertRaises(ValueError):
F.tensordot(a, b, axes=((1, 2), (0)))
with self.assertRaises(ValueError):
F.tensordot(a, b, axes=((2), (0, 1)))
with self.assertRaises(ValueError):
F.tensordot(a, b, axes=((0, 1, 2, 3), (0, 1, 2, 3)))
with self.assertRaises(ValueError):
F.tensordot(a, b, axes=(()))
with self.assertRaises(ValueError):
F.tensordot(a, b, axes=((), (), ()))
with self.assertRaises(TypeError):
F.tensordot(a, b, axes=1.0)
testing.run_module(__name__, __file__)
| 8,068
| 50.724359
| 119
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_erfcinv.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
def _erfcinv_cpu(x, dtype):
from scipy import special
return numpy.vectorize(special.erfcinv, otypes=[dtype])(x)
def _erfcinv_gpu(x, dtype):
return cuda.to_gpu(_erfcinv_cpu(cuda.to_cpu(x), dtype))
def _erfcinv_expected(x, dtype):
if backend.get_array_module(x) is numpy:
return _erfcinv_cpu(x, dtype)
else:
return _erfcinv_gpu(x, dtype)
def make_data(shape, dtype):
x = numpy.random.uniform(0.1, 1.9, shape).astype(dtype)
gy = numpy.random.uniform(-1, 1, shape).astype(dtype)
ggx = numpy.random.uniform(-1, 1, shape).astype(dtype)
return x, gy, ggx
@testing.unary_math_function_unittest(
F.erfcinv,
func_expected=_erfcinv_expected,
make_data=make_data,
forward_options={'atol': 1e-3, 'rtol': 1e-3},
backward_options={'eps': 1e-6},
double_backward_options={'eps': 1e-6}
)
@testing.with_requires('scipy')
class TestErfcinv(unittest.TestCase):
pass
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64]
}))
@testing.without_requires('scipy')
class TestErfcinvExceptions(unittest.TestCase):
def setUp(self):
self.x, self.gy, self.ggx = make_data(self.shape, self.dtype)
self.func = F.erfcinv
def check_forward(self, x_data):
x = chainer.Variable(x_data)
with self.assertRaises(ImportError):
self.func(x)
def test_forward_cpu(self):
self.check_forward(self.x)
testing.run_module(__name__, __file__)
| 1,693
| 23.911765
| 69
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_linear_interpolate.py
|
import numpy
from chainer import functions
from chainer import testing
from chainer import utils
@testing.parameterize(*testing.product({
'shape': [(3, 4), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestLinearInterpolate(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options.update({
'atol': 1e-3, 'rtol': 1e-3})
self.check_backward_options.update({
'atol': 5e-4, 'rtol': 5e-3})
self.check_double_backward_options.update({
'atol': 5e-3, 'rtol': 5e-2})
def generate_inputs(self):
p = numpy.random.uniform(0, 1, self.shape).astype(self.dtype)
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
y = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return p, x, y
def forward(self, inputs, device):
p, x, y = inputs
ret = functions.linear_interpolate(p, x, y)
ret = functions.cast(ret, numpy.float64)
return ret,
def forward_expected(self, inputs):
p, x, y = inputs
expected = p * x + (1 - p) * y
expected = utils.force_array(expected, dtype=numpy.float64)
return expected,
testing.run_module(__name__, __file__)
| 1,685
| 26.639344
| 70
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_matmul.py
|
import unittest
import numpy
import chainer
from chainer.backend import CpuDevice
import chainer.functions as F
from chainer import testing
from chainer.utils import type_check
@testing.parameterize(*testing.product_dict(
[
# matmul
{'x1_shape': (2, 5), 'x2_shape': (5, 10),
'transa': False, 'transb': False},
{'x1_shape': (5, 2), 'x2_shape': (5, 10),
'transa': True, 'transb': False},
{'x1_shape': (2, 5), 'x2_shape': (10, 5),
'transa': False, 'transb': True},
{'x1_shape': (5, 2), 'x2_shape': (10, 5),
'transa': True, 'transb': True},
# vector
{'x1_shape': (5,), 'x2_shape': (5,),
'transa': True, 'transb': False},
{'x1_shape': (5,), 'x2_shape': (5,),
'transa': False, 'transb': True},
# matrix-vector
{'x1_shape': (5,), 'x2_shape': (5, 2),
'transa': False, 'transb': False},
{'x1_shape': (5,), 'x2_shape': (5, 2),
'transa': True, 'transb': False},
{'x1_shape': (5,), 'x2_shape': (2, 5),
'transa': False, 'transb': True},
{'x1_shape': (2, 5), 'x2_shape': (5,),
'transa': False, 'transb': False},
{'x1_shape': (5, 2), 'x2_shape': (5,),
'transa': True, 'transb': False},
{'x1_shape': (2, 5), 'x2_shape': (5,),
'transa': False, 'transb': True},
# batched matmul
{'x1_shape': (6, 2, 5), 'x2_shape': (6, 5, 10),
'transa': False, 'transb': False},
{'x1_shape': (6, 5, 2), 'x2_shape': (6, 5, 10),
'transa': True, 'transb': False},
{'x1_shape': (6, 2, 5), 'x2_shape': (6, 10, 5),
'transa': False, 'transb': True},
{'x1_shape': (6, 5, 2), 'x2_shape': (6, 10, 5),
'transa': True, 'transb': True},
{'x1_shape': (2, 3, 4), 'x2_shape': (4,),
'transa': False, 'transb': False},
{'x1_shape': (4,), 'x2_shape': (2, 4, 3),
'transa': False, 'transb': False},
# batchsize = 1
{'x1_shape': (1, 2, 5), 'x2_shape': (1, 5, 10),
'transa': False, 'transb': False},
# 4dim batched matmul
{'x1_shape': (2, 3, 4, 5), 'x2_shape': (2, 3, 5, 6),
'transa': False, 'transb': False},
],
[
{'x1_dtype': numpy.float16},
{'x1_dtype': numpy.float32},
{'x1_dtype': numpy.float64},
],
[
{'x2_dtype': numpy.float16},
{'x2_dtype': numpy.float32},
{'x2_dtype': numpy.float64},
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1']
})
)
class TestMatMul(testing.FunctionTestCase):
def setUp(self):
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-5}
if self.x1_dtype == numpy.float16 or self.x2_dtype == numpy.float16:
self.check_forward_options = {'atol': 2e-3, 'rtol': 2e-3}
self.check_double_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
self.check_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
def generate_inputs(self):
x1 = x1 = numpy.random.uniform(.5, 1, self.x1_shape)
x1 = x1.astype(self.x1_dtype)
x2 = numpy.random.uniform(.5, 1, self.x2_shape)
x2 = x2.astype(self.x2_dtype)
return x1, x2
def forward_expected(self, inputs):
x1, x2 = inputs
if self.transa and x1.ndim >= 2:
x1 = x1.swapaxes(-1, -2)
if self.transb and x2.ndim >= 2:
x2 = x2.swapaxes(-1, -2)
if x1.ndim <= 2 or x2.ndim <= 2:
y = numpy.dot(x1, x2)
device = CpuDevice()
y = device.send(y)
else:
y = numpy.einsum('...ij,...jk->...ik', x1, x2)
return y,
def forward(self, inputs, device):
x1, x2 = inputs
y = F.matmul(x1, x2, transa=self.transa, transb=self.transb)
return y,
@testing.parameterize(*testing.product_dict(
[
# batched matmul 2d x 2d
{'x1_shape': (2, 3), 'x2_shape': (2, 3),
'transa': True, 'transb': False},
{'x1_shape': (2, 3), 'x2_shape': (2, 3),
'transa': False, 'transb': True},
# batched matmul 3d x 3d
{'x1_shape': (3, 2, 5), 'x2_shape': (3, 5, 4),
'transa': False, 'transb': False},
{'x1_shape': (3, 5, 2), 'x2_shape': (3, 5, 4),
'transa': True, 'transb': False},
{'x1_shape': (3, 2, 5), 'x2_shape': (3, 4, 5),
'transa': False, 'transb': True},
{'x1_shape': (3, 5, 2), 'x2_shape': (3, 4, 5),
'transa': True, 'transb': True},
# batched matmul 2d x 3d
{'x1_shape': (3, 5), 'x2_shape': (3, 1, 4),
'transa': False, 'transb': False},
{'x1_shape': (3, 5), 'x2_shape': (3, 5, 4),
'transa': True, 'transb': False},
{'x1_shape': (3, 5), 'x2_shape': (3, 4, 1),
'transa': False, 'transb': True},
{'x1_shape': (3, 5), 'x2_shape': (3, 4, 5),
'transa': True, 'transb': True},
# batched matmul 3d x 2d
{'x1_shape': (3, 2, 5), 'x2_shape': (3, 5),
'transa': False, 'transb': False},
{'x1_shape': (3, 5, 2), 'x2_shape': (3, 5),
'transa': True, 'transb': False},
{'x1_shape': (3, 2, 1), 'x2_shape': (3, 5),
'transa': False, 'transb': True},
{'x1_shape': (3, 1, 2), 'x2_shape': (3, 5),
'transa': True, 'transb': True},
# batchsize = 1
{'x1_shape': (1, 2, 5), 'x2_shape': (1, 5, 4),
'transa': False, 'transb': False},
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1']
})
)
class TestBatchMatMul(testing.FunctionTestCase):
x1_dtype = numpy.float32
x2_dtype = numpy.float32
def generate_inputs(self):
x1 = numpy.random.uniform(.5, 1, self.x1_shape)
x1 = x1.astype(self.x1_dtype)
x2 = numpy.random.uniform(.5, 1, self.x2_shape)
x2 = x2.astype(self.x2_dtype)
return x1, x2
def forward_expected(self, inputs):
x1, x2 = inputs
x1 = x1.reshape(x1.shape[:2] + (-1,))
if self.transa:
x1 = x1.swapaxes(-1, -2)
x2 = x2.reshape(x2.shape[:2] + (-1,))
if self.transb:
x2 = x2.swapaxes(-1, -2)
y_expect = numpy.einsum('...ij,...jk->...ik', x1, x2)
return y_expect,
def forward(self, inputs, device):
x1, x2 = inputs
with testing.assert_warns(DeprecationWarning):
y = F.batch_matmul(
x1, x2, transa=self.transa, transb=self.transb)
return y,
class TestMatMulInvalid(unittest.TestCase):
def test_invalid_shape(self):
x_data = numpy.zeros((2, 3, 4), dtype=numpy.float32)
y_data = numpy.zeros((3, 4, 3), dtype=numpy.float32)
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
with self.assertRaises(type_check.InvalidType):
F.matmul(x, y)
def test_invalid_ndim(self):
x_data = numpy.zeros((3, 2, 5), dtype=numpy.float32)
y_data = numpy.zeros((3, 5), dtype=numpy.float32)
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
with self.assertRaises(type_check.InvalidType):
F.matmul(x, y)
testing.run_module(__name__, __file__)
| 7,773
| 30.860656
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_ceil.py
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestCeil(testing.FunctionTestCase):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
while True:
x = numpy.random.uniform(
-10.0, 10.0, self.shape).astype(self.dtype)
if (numpy.abs(x - numpy.round(x)) > 1e-2).all():
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.ceil(x)
return y,
def forward_expected(self, inputs):
x, = inputs
expected = numpy.ceil(x)
expected = numpy.asarray(expected)
return expected,
testing.run_module(__name__, __file__)
| 1,220
| 21.611111
| 60
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_erf.py
|
import math
import unittest
import numpy
from chainer import backend
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
def _erf_cpu(x, dtype):
return numpy.vectorize(math.erf, otypes=[dtype])(x)
def _erf_gpu(x, dtype):
return cuda.to_gpu(_erf_cpu(cuda.to_cpu(x), dtype))
def _erf_expected(x, dtype):
if backend.get_array_module(x) is numpy:
return _erf_cpu(x, dtype)
else:
return _erf_gpu(x, dtype)
@testing.unary_math_function_unittest(
F.erf,
func_expected=_erf_expected,
)
class TestErf(unittest.TestCase):
pass
testing.run_module(__name__, __file__)
| 652
| 17.138889
| 55
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_zeta.py
|
import unittest
import numpy
import chainer
import chainer.functions as F
from chainer import testing
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'shape': [(), (3, 2)],
'x_range': [(1.1, 2), (2, 50)],
'q_range': [(1.1, 2), (2, 50)],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1']
})
)
@testing.with_requires('scipy')
class TestZeta(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-2, 'rtol': 1e-2}
self.check_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
else:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-3}
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-3}
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-3}
low, high = self.x_range
self._x = numpy.random.uniform(
low=low, high=high, size=self.shape).astype(self.dtype)
def generate_inputs(self):
low, high = self.q_range
q = numpy.random.uniform(
low=low, high=high, size=self.shape).astype(self.dtype)
return q,
def forward_expected(self, inputs):
q, = inputs
import scipy
y_expect = scipy.special.zeta(self._x, q)
return numpy.array(y_expect, dtype=self.dtype),
def forward(self, inputs, device):
q, = inputs
y = F.zeta(device.send(self._x.astype(q.dtype)), q)
return y,
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'shape': [(), (3, 2)]
}))
@testing.without_requires('scipy')
class TestZetaExceptions(unittest.TestCase):
def setUp(self):
self._x = numpy.random.uniform(low=-5, high=1, size=self.shape).\
astype(self.dtype)
self.q = numpy.random.uniform(low=-5, high=1, size=self.shape).\
astype(self.dtype)
self.func = F.zeta
def check_forward(self, q_data):
q = chainer.Variable(q_data)
with self.assertRaises(ImportError):
self.func(q, self._x)
def test_zeta_forward_cpu(self):
self.check_forward(self.q)
testing.run_module(__name__, __file__)
| 2,622
| 27.824176
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_hyperbolic.py
|
import numpy
from chainer import functions
from chainer import testing
from chainer.utils import force_array
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'function_name': ['cosh', 'sinh'],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestCoshSinh(testing.FunctionTestCase):
def setUp(self):
self.check_forward_options.update({'atol': 1e-7, 'rtol': 1e-7})
if self.dtype == numpy.float16:
self.check_backward_options.update({
'atol': 1e-3, 'rtol': 1e-2})
self.check_double_backward_options.update({
'atol': 1e-3, 'rtol': 1e-2})
else:
self.check_backward_options.update({
'atol': 1e-4, 'rtol': 1e-3})
self.check_double_backward_options.update({
'atol': 1e-4, 'rtol': 1e-3})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
function = getattr(functions, self.function_name)
return function(x),
def forward_expected(self, inputs):
x, = inputs
function = getattr(numpy, self.function_name)
expected = function(x)
expected = force_array(expected)
return expected,
testing.run_module(__name__, __file__)
| 1,743
| 26.25
| 71
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/math_tests/test_batch_l2_norm_squared.py
|
import unittest
import numpy as np
import six
import chainer
from chainer import functions
from chainer import testing
from chainer.utils import type_check
def _as_two_dim(x):
if x.ndim == 2:
return x
return x.reshape((len(x), -1))
@testing.parameterize(*testing.product({
'dtype': [np.float16, np.float32, np.float64],
'shape': [(4, 3, 5), (4, 15)],
}))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
}))
class TestBatchL2NormSquared(testing.FunctionTestCase):
def setUp(self):
if self.dtype == np.float16:
self.check_forward_options.update({'atol': 1e-3, 'rtol': 1e-3})
self.check_backward_options.update({'atol': 1e-2, 'rtol': 1e-2})
self.check_double_backward_options.update({
'atol': 1e-2, 'rtol': 1e-2})
def generate_inputs(self):
x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.batch_l2_norm_squared(x),
def forward_expected(self, inputs):
x, = inputs
x_two_dim = _as_two_dim(x)
y_expect = np.empty(len(x), dtype=self.dtype)
for n in six.moves.range(len(x)):
y_expect[n] = sum(map(lambda x: x * x, x_two_dim[n]))
return y_expect,
class TestBatchL2NormSquaredTypeError(unittest.TestCase):
def test_invalid_shape(self):
x = chainer.Variable(np.zeros((4,), dtype=np.float32))
with self.assertRaises(type_check.InvalidType):
functions.batch_l2_norm_squared(x)
testing.run_module(__name__, __file__)
| 1,924
| 24.666667
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/normalization_tests/test_batch_renormalization.py
|
import numpy
import six
import chainer
from chainer.functions.normalization import batch_renormalization
from chainer import testing
import chainerx
# naive implementation of differentiable batch renormalization
def _naive_batch_renormalization(
x, gamma, beta, # variables
mean, var, # variables
running_mean, running_var, # arrays
rmax, dmax, eps, decay):
# If decay is not None, the running stats are updated.
F = chainer.functions
assert isinstance(x, chainer.Variable)
assert isinstance(gamma, chainer.Variable)
assert isinstance(beta, chainer.Variable)
assert isinstance(mean, chainer.Variable)
assert isinstance(var, chainer.Variable)
assert isinstance(running_mean, chainer.get_array_types())
assert isinstance(running_var, chainer.get_array_types())
assert mean.shape == var.shape
assert mean.shape == running_mean.shape
assert mean.shape == running_var.shape
assert mean.shape == gamma.shape
dt = x.dtype.type
std = F.sqrt(var + dt(eps))
# r and d are gradient-stopped
running_std = numpy.sqrt(running_var + dt(eps))
r = (std.array / running_std).clip(1. / rmax, rmax)
d = ((mean.array - running_mean) / running_std).clip(-dmax, dmax)
xhat = (x - mean) / std * r + d
y = gamma * xhat + beta
# Update running stats
if decay is not None:
running_mean *= decay
running_mean += mean.array * dt(1. - decay)
# unbiased estimation
m = x.size // gamma.size
adjust = m / max(m - 1., 1.)
running_var *= decay
running_var += var.array * dt((1. - decay) * adjust)
return y
def parameterize_batch_renormalization():
return testing.parameterize(*(testing.product({
'ndim': [0, 1, 2],
'eps': [2e-5, 1e-1],
'dtype': [numpy.float32],
'update_statistics': [True, False],
}) + testing.product({
'ndim': [1],
'eps': [2e-5, 1e-1],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'update_statistics': [True, False],
})))
def inject_backend_tests_batch_renormalization():
return testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ [
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
]
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
@parameterize_batch_renormalization()
@inject_backend_tests_batch_renormalization()
class TestBatchRenormalizationForward(testing.FunctionTestCase):
# As F.batch_renormalization includes a calculation in which the outcome
# depends on x but it's gradient-stopped w.r.t. x,
# gradient_check.check_backward cannot be used, because numerical
# gradients would not be calculated correctly.
# Instead of using gradient_check.check_backward, this test checks the
# backward gradients as a "forward" function.
# In addition, updated running_mean and running_var are also included in
# the outputs of the "forward" function.
skip_backward_test = True
skip_double_backward_test = True
rmax = 3
dmax = 5
def setUp(self):
shape = (5, 3) + (2,) * self.ndim
aggr_shape = (3,)
self.running_mean = (
numpy.random.uniform(-1, 1, aggr_shape).astype(self.dtype))
self.running_var = (
numpy.random.uniform(1e-3, 1, aggr_shape).astype(self.dtype))
axis = (0,) + tuple(six.moves.range(2, self.ndim + 2))
expander = (None, Ellipsis) + (None,) * self.ndim
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 0.02, 'rtol': 0.02}
self.shape = shape
self.aggr_shape = aggr_shape
self.axis = axis
self.expander = expander
def generate_inputs(self):
shape = self.shape
aggr_shape = self.aggr_shape
dtype = self.dtype
x = numpy.random.uniform(-10, 10, shape).astype(dtype)
gamma = numpy.random.uniform(.5, 1, aggr_shape).astype(dtype)
beta = numpy.random.uniform(-1, 1, aggr_shape).astype(dtype)
gy = numpy.random.uniform(-1, 1, shape).astype(dtype)
return x, gamma, beta, gy
def _compute_backward(self, x, gamma, beta, y, gy):
assert isinstance(x, chainer.Variable)
assert isinstance(gamma, chainer.Variable)
assert isinstance(beta, chainer.Variable)
assert isinstance(y, chainer.Variable)
assert isinstance(gy, chainer.Variable)
if x.xp is chainerx:
# TODO(niboshi): ChainerX does not support grad yet
y.grad = gy.array.copy()
y.backward()
gx = x.grad_var
ggamma = gamma.grad_var
gbeta = beta.grad_var
else:
gx, ggamma, gbeta = chainer.grad([y], [x, gamma, beta], [gy])
return gx.array, ggamma.array, gbeta.array
def forward(self, inputs, device):
x, gamma, beta, gy = inputs
running_mean = device.send(self.running_mean.copy())
running_var = device.send(self.running_var.copy())
y = batch_renormalization.batch_renormalization(
x, gamma, beta,
self.rmax, self.dmax,
eps=self.eps,
running_mean=running_mean,
running_var=running_var,
update_statistics=self.update_statistics)
# backward gradients
gx, ggamma, gbeta = self._compute_backward(x, gamma, beta, y, gy)
return (
y,
chainer.Variable(running_mean),
chainer.Variable(running_var),
chainer.Variable(gx),
chainer.Variable(ggamma),
chainer.Variable(gbeta),
)
def forward_expected(self, inputs):
F = chainer.functions
expander = self.expander
axis = self.axis
if self.update_statistics:
decay = 0.9 # defaut value of F.batch_renormalization
else:
decay = None
x_arr, gamma_arr, beta_arr, gy_arr = inputs
x = chainer.Variable(x_arr)
gamma = chainer.Variable(gamma_arr[expander])
beta = chainer.Variable(beta_arr[expander])
x_mean = F.mean(x, axis=axis, keepdims=True)
x_var = F.mean((x - x_mean) ** 2, axis=axis, keepdims=True)
running_mean = self.running_mean.copy()
running_var = self.running_var.copy()
y = _naive_batch_renormalization(
x, gamma, beta,
x_mean,
x_var,
running_mean[expander],
running_var[expander],
self.rmax, self.dmax, self.eps,
decay)
# backward gradients
gx, ggamma, gbeta = self._compute_backward(
x, gamma, beta, y, chainer.Variable(gy_arr))
ggamma = numpy.squeeze(ggamma, axis)
gbeta = numpy.squeeze(gbeta, axis)
return (
y.array,
running_mean, running_var,
gx, ggamma, gbeta)
@testing.parameterize(*testing.product({
'ndim': [0, 1, 2, 3],
'eps': [2e-5, 1e-1],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@inject_backend_tests_batch_renormalization()
class TestFixedBatchRenormalization(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 5e-3, 'rtol': 1e-2})
self.check_backward_options.update({'atol': 5e-3, 'rtol': 1e-2})
self.check_double_backward_options.update(
{'atol': 5e-3, 'rtol': 1e-2})
def generate_inputs(self):
channels = 3
shape = (5, channels) + (2,) * self.ndim
dtype = self.dtype
x = numpy.random.uniform(-1, 1, shape).astype(dtype)
gamma = numpy.random.uniform(.5, 1, (channels,)).astype(dtype)
beta = numpy.random.uniform(-1, 1, (channels,)).astype(dtype)
mean = numpy.random.uniform(-1, 1, (channels,)).astype(dtype)
var = numpy.random.uniform(0.5, 1, (channels,)).astype(dtype)
return x, gamma, beta, mean, var
def forward(self, inputs, device):
x, gamma, beta, mean, var = inputs
with testing.assert_warns(DeprecationWarning):
y = batch_renormalization.fixed_batch_renormalization(
x, gamma, beta, mean, var, eps=self.eps)
return y,
def forward_expected(self, inputs):
expander = (None, Ellipsis) + (None,) * self.ndim
x_arr, gamma_arr, beta_arr, mean_arr, var_arr = inputs
x = chainer.Variable(x_arr)
gamma = chainer.Variable(gamma_arr[expander])
beta = chainer.Variable(beta_arr[expander])
y = _naive_batch_renormalization(
x, gamma, beta,
chainer.Variable(mean_arr[expander]),
chainer.Variable(var_arr[expander]),
mean_arr[expander], var_arr[expander],
1, 0, self.eps, None)
return y.array,
testing.run_module(__name__, __file__)
| 9,263
| 33.438662
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/normalization_tests/test_l2_normalization.py
|
import functools
import itertools
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
def _skip_if(cond, reason):
"""Skip test if cond(self) is True"""
def decorator(impl):
@functools.wraps(impl)
def wrapper(self, *args, **kwargs):
if cond(self):
raise unittest.SkipTest(reason)
else:
impl(self, *args, **kwargs)
return wrapper
return decorator
def _is_good_param(param):
# Check if 'nonzero' param is valid and meaningful. On the latter point,
# x should contain at least a zero if 'nonzeros' param is given.
return param['nonzeros'] is None \
or param['nonzeros'] < numpy.prod(param['shape'])
@testing.parameterize(*filter(_is_good_param, testing.product([
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
[
{'shape': (4, 15), 'axis': 1},
{'shape': (4,), 'axis': 0},
{'shape': (4, 3, 2, 5), 'axis': 0},
{'shape': (4, 3, 2, 5), 'axis': 1},
{'shape': (4, 3, 2, 5), 'axis': 2},
{'shape': (4, 3, 2, 5), 'axis': 3},
{'shape': (4, 3, 2), 'axis': (0, 1)},
{'shape': (4, 3, 2, 4, 3, 2, 2), 'axis': (1, 4, 3, 6)},
{'shape': (0, 2), 'axis': 1},
{'shape': (), 'axis': ()},
],
[
# nonzeros (optional int): number of nonzero elems in input
# truezero (bool): flag whether zero elems are exactly zero. If false,
# randomly-chosen small values are used.
{'eps': 1e-5, 'nonzeros': None},
{'eps': 1e-1, 'nonzeros': None},
{'eps': 1e-1, 'nonzeros': 0, 'truezero': True},
{'eps': 1e-1, 'nonzeros': 0, 'truezero': False},
{'eps': 1e-1, 'nonzeros': 2, 'truezero': True},
{'eps': 1e-1, 'nonzeros': 2, 'truezero': False},
],
])))
class TestL2Normalization(unittest.TestCase):
def setUp(self):
min_abs = 0.1
if self.dtype == numpy.float16:
tuple_axis = self.axis
if not isinstance(tuple_axis, tuple):
tuple_axis = (tuple_axis,)
aggr_size = numpy.prod(
[self.shape[i] for i in tuple_axis], dtype=int)
min_abs = max(min_abs, 0.5 / aggr_size)
self.x = chainer.utils.force_array(
numpy.random.uniform(min_abs, 1, self.shape)
* (1 - 2 * numpy.random.randint(2, size=self.shape)),
self.dtype)
if self.nonzeros is not None:
# Make self.x have limited number of large values
# get mask of indices to modify at
zeros = self.x.size - self.nonzeros
while True:
rand = numpy.random.uniform(0, 1, self.shape)
mask = rand <= numpy.sort(rand.ravel())[zeros - 1]
if self.x[mask].shape == (zeros,):
break
# set zeros or small values to a part of the input
if self.truezero:
self.x[mask] = 0
else:
zero_scale = 10. ** numpy.random.randint(-40, -3)
self.x[mask] = numpy.random.uniform(
-zero_scale, zero_scale, zeros)
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(
-1, 1, self.shape).astype(self.dtype)
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-3}
else:
self.check_forward_options = {}
if self.nonzeros is None:
if self.dtype == numpy.float16:
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 5e-3, 'rtol': 5e-3}
self.check_double_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
else:
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-4, 'rtol': 1e-4}
self.check_double_backward_options = {
'dtype': numpy.float64, 'atol': 1e-4, 'rtol': 1e-4}
else:
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2,
'eps': 1e-4}
def check_forward(self, x_data, axis):
eps = self.eps
x = chainer.Variable(x_data)
y = functions.normalize(x, eps=eps, axis=axis)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
y_expect = numpy.empty_like(self.x)
shape = self.x.shape
indices = []
axis_tuple = axis if isinstance(axis, tuple) else (axis,)
for i in six.moves.range(len(shape)):
if i not in axis_tuple:
indices.append(six.moves.range(shape[i]))
else:
indices.append([slice(None)])
indices_tuple = list(itertools.product(*indices))
for index in indices_tuple:
# Note: Casting back the result of `numpy.linalg.norm` to `x.dtype`
# because old NumPy casts it to float32 when a float16 value is
# given.
numerator = numpy.linalg.norm(self.x[index]).astype(x.dtype) + eps
y_expect[index] = self.x[index] / numerator
testing.assert_allclose(y_expect, y_data, **self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(self.x, self.axis)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), self.axis)
def check_backward(self, x_data, axis, y_grad):
def f(x):
return functions.normalize(x, eps=self.eps, axis=axis)
gradient_check.check_backward(
f, x_data, y_grad, **self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.axis, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), self.axis, cuda.to_gpu(self.gy))
@_skip_if(
lambda self: self.nonzeros is not None,
'backward of L2Normalize is non-differentiable at zero vector')
def check_double_backward(self, x_data, axis, y_grad, x_grad_grad):
def f(x):
return functions.normalize(x, eps=self.eps, axis=axis)
gradient_check.check_double_backward(
f, x_data, y_grad, x_grad_grad,
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.axis, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), self.axis, cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx))
def check_eps(self, x_data):
x = chainer.Variable(x_data)
y = functions.normalize(x, axis=self.axis)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
y_expect = numpy.zeros_like(self.x)
testing.assert_allclose(y_expect, y_data)
def test_eps_cpu(self):
self.check_eps(numpy.zeros_like(self.x))
@attr.gpu
def test_eps_gpu(self):
self.check_eps(cuda.to_gpu(numpy.zeros_like(self.x)))
testing.run_module(__name__, __file__)
| 7,483
| 34.469194
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/normalization_tests/test_batch_normalization.py
|
import unittest
import warnings
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import backend
def _as_noncontiguous_array(array):
# TODO(niboshi): cupy + cudnn test fails in F.fixed_batch_normalization.
# Fix it and use testing.array._as_noncontiguous_array.
def as_noncontiguous_array(arr):
if arr is None:
return None
if isinstance(arr, (numpy.ndarray, cuda.ndarray)):
xp = chainer.backend.get_array_module(arr)
return xp.asfortranarray(arr)
return testing.array._as_noncontiguous_array(arr)
if isinstance(array, (list, tuple)):
return type(array)([as_noncontiguous_array(arr) for arr in array])
return as_noncontiguous_array(array)
def _batch_normalization(
inputs, running_mean=None, running_var=None, decay=None):
x, gamma, beta, mean, var, eps, expander = inputs
mean_expanded = mean[expander]
std = numpy.sqrt(var + eps)[expander]
y_expect = (gamma[expander] * (x - mean_expanded) / std + beta[expander])
if running_mean is not None or running_var is not None:
m = x.size // gamma.size
adjust = m / max(m - 1., 1.) # unbiased estimation
if running_mean is not None:
running_mean *= decay
running_mean += (1 - decay) * mean
if running_var is not None:
running_var *= decay
running_var += (1 - decay) * adjust * var
return y_expect
@testing.parameterize(*(testing.product_dict(
testing.product({
'param_shape': [(3,), (3, 4), (3, 2, 3)],
'ndim': [0, 1, 2],
}) + [
{'input_shape': (5, 4, 3, 2), 'axis': (0, 2, 3)},
{'input_shape': (5, 4), 'axis': 0},
{'input_shape': (5, 4, 3), 'axis': (0, 1)},
],
testing.product({
'xdtype': [numpy.float32, numpy.float64],
'dtype': [numpy.float32, numpy.float64],
'eps': [2e-5, 5e-1],
'c_contiguous': [True, False],
'running_statistics': [True, False],
}),
) + testing.product({
'param_shape': [(3,)],
'ndim': [1],
'eps': [2e-5, 5e-1],
'xdtype': [numpy.float16, numpy.float32, numpy.float64],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'c_contiguous': [True, False],
'running_statistics': [True, False],
})))
@backend.inject_backend_tests(
['test_forward', 'test_backward', 'test_double_backward'],
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cudnn_fast_batch_normalization': [True, False],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
])
class TestBatchNormalization(unittest.TestCase):
def setUp(self):
dtype = self.dtype
xdtype = self.xdtype
if not hasattr(self, 'axis'):
param_shape = self.param_shape
ndim = self.ndim
shape = (5,) + param_shape + (2,) * ndim
else:
aggr_axes = self.axis
if isinstance(self.axis, int):
aggr_axes = self.axis,
param_shape = tuple(
s
for i, s in enumerate(self.input_shape)
if i not in aggr_axes
)
shape = self.input_shape
# x, ggx, gy must share the same data type
# gamma, beta, gggamma, ggbeta must share the same data type
gamma = numpy.random.uniform(.5, 1, param_shape).astype(dtype)
beta = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
x = numpy.random.uniform(-1, 1, shape).astype(xdtype)
gy = numpy.random.uniform(-1, 1, shape).astype(xdtype)
ggx = numpy.random.uniform(-1, 1, shape).astype(xdtype)
gggamma = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
ggbeta = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
if self.running_statistics:
self.running_mean = numpy.random.uniform(
-1, 1, param_shape).astype(dtype)
self.running_var = numpy.random.uniform(
-1, 1, param_shape).astype(dtype)
else:
self.running_mean = None
self.running_var = None
if not hasattr(self, 'axis'):
head_ndim = gamma.ndim + 1
aggr_axes = (0,) + tuple(six.moves.range(head_ndim, x.ndim))
self.expander = (None, Ellipsis) + (None,) * ndim
else:
self.expander = tuple(
None if i in aggr_axes else slice(None)
for i in range(x.ndim)
)
mean = x.mean(axis=aggr_axes)
var = x.var(axis=aggr_axes)
self.decay = 0.9
self.mean = mean
self.var = var
self.inputs = [x, gamma, beta]
self.grad_outputs = [gy]
self.grad_grad_inputs = [ggx, gggamma, ggbeta]
self.bn_options = {
'decay': self.decay,
'eps': self.eps,
}
if hasattr(self, 'axis'):
self.bn_options['axis'] = self.axis
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-4, 'rtol': 1e-3}
self.check_double_backward_options = {
'dtype': numpy.float64, 'atol': 1e-3, 'rtol': 1e-2}
if self.xdtype == numpy.float16 or self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-2, 'rtol': 1e-2}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
self.check_double_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
def forward_cpu(self, inputs, running_mean, running_var):
y_expect = _batch_normalization(
inputs + [self.mean, self.var, self.eps, self.expander],
running_mean, running_var, self.decay)
return y_expect,
def check_forward(self, inputs, backend_config):
if self.running_statistics:
running_mean_expected = self.running_mean.copy()
running_var_expected = self.running_var.copy()
else:
running_mean_expected = None
running_var_expected = None
y_expected, = self.forward_cpu(
inputs, running_mean_expected, running_var_expected)
inputs = backend_config.get_array(inputs)
running_mean = backend_config.get_array(self.running_mean)
running_var = backend_config.get_array(self.running_var)
if not self.c_contiguous:
with backend_config:
inputs = _as_noncontiguous_array(inputs)
running_mean = _as_noncontiguous_array(running_mean)
running_var = _as_noncontiguous_array(running_var)
with backend_config:
y = functions.batch_normalization(
*inputs, running_mean=running_mean,
running_var=running_var, **self.bn_options)
assert y.data.dtype == self.xdtype
testing.assert_allclose(
y_expected, y.data, **self.check_forward_options)
if self.running_statistics:
testing.assert_allclose(
running_mean_expected, running_mean,
**self.check_forward_options)
testing.assert_allclose(
running_var_expected, running_var,
**self.check_forward_options)
def test_forward(self, backend_config):
self.check_forward(self.inputs, backend_config)
def check_backward(self, inputs, grad_outputs, backend_config):
inputs = backend_config.get_array(inputs)
grad_outputs = backend_config.get_array(grad_outputs)
if not self.c_contiguous:
with backend_config:
inputs = _as_noncontiguous_array(inputs)
grad_outputs = _as_noncontiguous_array(grad_outputs)
def f(*inputs):
y = functions.batch_normalization(
*inputs, **self.bn_options)
return y,
with backend_config:
gradient_check.check_backward(
f, inputs, grad_outputs,
**self.check_backward_options)
def test_backward(self, backend_config):
self.check_backward(self.inputs, self.grad_outputs, backend_config)
def check_double_backward(
self, inputs, grad_outputs, grad_grad_inputs, backend_config):
inputs = backend_config.get_array(inputs)
grad_outputs = backend_config.get_array(grad_outputs)
grad_grad_inputs = backend_config.get_array(grad_grad_inputs)
if not self.c_contiguous:
with backend_config:
inputs = _as_noncontiguous_array(inputs)
grad_outputs = _as_noncontiguous_array(grad_outputs)
grad_grad_inputs = _as_noncontiguous_array(grad_grad_inputs)
def f(*inputs):
return functions.batch_normalization(
*inputs, **self.bn_options)
with backend_config:
gradient_check.check_double_backward(
f, inputs, grad_outputs, grad_grad_inputs,
**self.check_double_backward_options)
def test_double_backward(self, backend_config):
self.check_double_backward(
self.inputs, self.grad_outputs, self.grad_grad_inputs,
backend_config)
@testing.parameterize(*(testing.product({
'param_shape': [(3,), (3, 4), (3, 2, 3)],
'ndim': [0, 1, 2],
'eps': [2e-5, 5e-1],
'dtype': [numpy.float32],
'c_contiguous': [True, False],
}) + testing.product({
'param_shape': [(3,)],
'ndim': [1],
'eps': [2e-5, 5e-1],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'c_contiguous': [True, False],
})))
@backend.inject_backend_tests(
None,
# CPU tests
[{'use_cuda': False}]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cudnn_fast_batch_normalization': [True, False],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
])
class TestFixedBatchNormalization(unittest.TestCase):
def setUp(self):
param_shape = self.param_shape
dtype = self.dtype
ndim = self.ndim
gamma = numpy.random.uniform(.5, 1, param_shape).astype(dtype)
beta = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
shape = (5,) + param_shape + (2,) * ndim
x = numpy.random.uniform(-1, 1, shape).astype(dtype)
mean = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
var = numpy.random.uniform(0.5, 1, param_shape).astype(dtype)
gy = numpy.random.uniform(-1, 1, shape).astype(dtype)
ggx = numpy.random.uniform(-1, 1, shape).astype(dtype)
gggamma = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
ggbeta = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
ggmean = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
ggvar = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
self.decay = 0.0
self.expander = (None, Ellipsis) + (None,) * ndim
self.inputs = [x, gamma, beta, mean, var]
self.grad_outputs = [gy]
self.grad_grad_inputs = [ggx, gggamma, ggbeta, ggmean, ggvar]
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'dtype': numpy.float64}
self.check_double_backward_options = {'dtype': numpy.float64}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-2, 'rtol': 1e-2}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
self.check_double_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
def forward_cpu(self, inputs):
y_expect = _batch_normalization(inputs + [self.eps, self.expander])
return y_expect,
def check_forward(self, inputs, enable_backprop, backend_config):
y_expected, = self.forward_cpu(inputs)
inputs = backend_config.get_array(inputs)
if not self.c_contiguous:
with backend_config:
inputs = _as_noncontiguous_array(inputs)
with chainer.using_config('enable_backprop', enable_backprop):
with backend_config:
y = functions.fixed_batch_normalization(*inputs, eps=self.eps)
assert y.data.dtype == self.dtype
testing.assert_allclose(
y_expected, y.data, **self.check_forward_options)
def test_forward(self, backend_config):
self.check_forward(self.inputs, False, backend_config)
def test_forward_with_enable_backprop(self, backend_config):
self.check_forward(self.inputs, True, backend_config)
def check_backward(self, inputs, grad_outputs, backend_config):
inputs = backend_config.get_array(inputs)
grad_outputs = backend_config.get_array(grad_outputs)
if not self.c_contiguous:
with backend_config:
inputs = _as_noncontiguous_array(inputs)
grad_outputs = _as_noncontiguous_array(grad_outputs)
def f(*inputs):
y = functions.fixed_batch_normalization(*inputs, eps=self.eps)
return y,
with backend_config:
gradient_check.check_backward(
f, inputs, grad_outputs,
**self.check_backward_options)
def test_backward(self, backend_config):
self.check_backward(self.inputs, self.grad_outputs, backend_config)
def check_double_backward(
self, inputs, grad_outputs, grad_grad_inputs, backend_config):
inputs = backend_config.get_array(inputs)
grad_outputs = backend_config.get_array(grad_outputs)
grad_grad_inputs = backend_config.get_array(grad_grad_inputs)
if not self.c_contiguous:
with backend_config:
inputs = _as_noncontiguous_array(inputs)
grad_outputs = _as_noncontiguous_array(grad_outputs)
grad_grad_inputs = _as_noncontiguous_array(grad_grad_inputs)
def f(*inputs):
return functions.fixed_batch_normalization(*inputs, eps=self.eps)
with backend_config:
gradient_check.check_double_backward(
f, inputs, grad_outputs, grad_grad_inputs,
**self.check_double_backward_options)
def test_double_backward(self, backend_config):
self.check_double_backward(
self.inputs, self.grad_outputs, self.grad_grad_inputs,
backend_config)
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'eps': [2e-5, 5e-1],
# TODO(bkvogel): Check float16 support again in next cuDNN version.
'dtype': [numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestBatchNormalizationCudnnCall(unittest.TestCase):
def setUp(self):
ndim = 0
param_shape = (3,)
self.gamma = cuda.cupy.random.uniform(.5, 1,
param_shape).astype(self.dtype)
self.beta = cuda.cupy.random.uniform(-1, 1,
param_shape).astype(self.dtype)
shape = (7,) + param_shape + (2,) * ndim
self.x = cuda.cupy.random.uniform(-1, 1, shape).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(-1, 1, shape).astype(self.dtype)
self.args = [self.x, self.gamma, self.beta]
head_ndim = self.gamma.ndim + 1
self.aggr_axes = (0,) + tuple(six.moves.range(head_ndim, self.x.ndim))
self.mean = self.x.mean(axis=self.aggr_axes)
self.var = self.x.var(axis=self.aggr_axes) + self.eps
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expect = chainer.should_use_cudnn('>=auto', 5000)
def forward(self):
return functions.batch_normalization(
*[chainer.Variable(i) for i in self.args], eps=self.eps,
running_mean=self.mean, running_var=self.var)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch(
'cupy.cudnn.batch_normalization_forward_training_ex'
) as func:
self.forward()
self.assertEqual(func.called, self.expect)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
y.grad = self.gy
with testing.patch(
'cupy.cudnn.batch_normalization_backward'
) as func:
y.backward()
self.assertEqual(func.called, self.expect)
@attr.cudnn
class TestBatchNormalizationCudnnEps(unittest.TestCase):
def setUp(self):
ndim = 0
param_shape = (3,)
dtype = numpy.float32
gamma = cuda.cupy.random.uniform(.5, 1, param_shape).astype(dtype)
beta = cuda.cupy.random.uniform(-1, 1, param_shape).astype(dtype)
shape = (7,) + param_shape + (2,) * ndim
x = cuda.cupy.random.uniform(-1, 1, shape).astype(dtype)
self.args = [x, gamma, beta]
def test_valid(self):
functions.batch_normalization(*self.args, eps=1e-5)
def test_invalid(self):
eps = -0.1
if chainer.backends.cuda.libcudnn.get_build_version() < 7500:
eps = 2e-6
with self.assertRaises(RuntimeError):
functions.batch_normalization(*self.args, eps=eps)
@attr.cudnn
class TestFixedBatchNormalizationCudnnEps(unittest.TestCase):
def setUp(self):
ndim = 0
param_shape = (3,)
dtype = numpy.float32
gamma = cuda.cupy.random.uniform(.5, 1, param_shape).astype(dtype)
beta = cuda.cupy.random.uniform(-1, 1, param_shape).astype(dtype)
mean = cuda.cupy.random.uniform(-1, 1, param_shape).astype(dtype)
var = cuda.cupy.random.uniform(-1, 1, param_shape).astype(dtype)
shape = (7,) + param_shape + (2,) * ndim
x = cuda.cupy.random.uniform(-1, 1, shape).astype(dtype)
self.args = [x, gamma, beta, mean, var]
def test_valid(self):
functions.fixed_batch_normalization(*self.args, eps=1e-5)
def test_invalid(self):
eps = -0.1
if chainer.backends.cuda.libcudnn.get_build_version() < 7500:
eps = 2e-6
with self.assertRaises(RuntimeError):
functions.fixed_batch_normalization(*self.args, eps=eps)
class TestBatchNormalizationWarning(unittest.TestCase):
def setUp(self):
pass
def create_batch(self, param_shape, x_shape):
dtype = numpy.float32
gamma = numpy.random.uniform(.5, 1, param_shape).astype(dtype)
beta = numpy.random.uniform(-1, 1, param_shape).astype(dtype)
x = numpy.random.uniform(-1, 1, x_shape).astype(dtype)
args = [x, gamma, beta]
return args
def test_invalid_batch(self):
args = self.create_batch((3,), (1, 3))
with testing.assert_warns(UserWarning):
functions.batch_normalization(*args)
def test_invalid_batch_no_batch_axis(self):
args = self.create_batch((1, 3,), (1, 3, 1))
with testing.assert_warns(UserWarning):
functions.batch_normalization(*args, axis=2)
def test_valid_batch(self):
args = self.create_batch((3,), (1, 3, 2, 2))
with warnings.catch_warnings(record=True) as w:
functions.batch_normalization(*args)
assert len(w) == 0
def test_valid_batch_no_batch_axis(self):
args = self.create_batch((1, 3,), (1, 3, 2))
with warnings.catch_warnings(record=True) as w:
functions.batch_normalization(*args, axis=2)
assert len(w) == 0
testing.run_module(__name__, __file__)
| 20,347
| 36.681481
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/normalization_tests/test_group_normalization.py
|
import numpy
import six
from chainer import functions
import chainer.functions.normalization.group_normalization as gn_module
from chainer import testing
def _simple_group_normalization(x, groups, gamma, beta, eps=1e-5):
batch_size, channels = x.shape[:2]
x_reshape = x.reshape(batch_size, groups, channels // groups, -1)
mean = numpy.mean(x_reshape, axis=(2, 3), keepdims=True)
var = numpy.var(x_reshape, axis=(2, 3), keepdims=True)
std = numpy.sqrt(var + eps, dtype=x.dtype)
x_hat = (x_reshape - mean) / std
x_hat = x_hat.reshape(x.shape)
for i in six.moves.xrange(x.ndim):
if i != 1: # except for channel dim
gamma = numpy.expand_dims(gamma, i)
beta = numpy.expand_dims(beta, i)
return x_hat * gamma + beta
@testing.parameterize(*(testing.product({
'shape': [(1, 4, 5, 3), (5, 4, 7), (3, 20)],
'groups': [1, 2, 4],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'eps': [1e-5, 1e-1],
})))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestGroupNormalization(testing.FunctionTestCase):
def setUp(self):
self.check_forward_options.update({'atol': 1e-4, 'rtol': 1e-3})
self.check_backward_options.update({'atol': 1e-3, 'rtol': 1e-2})
self.check_double_backward_options.update({'atol': 1e-3, 'rtol': 1e-2})
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 1e-2, 'rtol': 1e-2})
self.check_backward_options.update({'atol': 1e-2, 'rtol': 1e-2})
self.check_double_backward_options.update(
{'atol': 1e-2, 'rtol': 1e-2})
def generate_inputs(self):
shape = self.shape
# sample x such that x.std >= min_std
min_std = 0.2 if self.dtype == numpy.float16 else 0.02
retry = 0
while True:
x = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
x_groups = x.reshape(shape[0], self.groups, -1)
if x_groups.std(axis=2).min() >= min_std:
break
retry += 1
assert retry <= 20, 'Too many retries to generate inputs'
gamma = numpy.random.uniform(-1, 1, shape[1]).astype(self.dtype)
beta = numpy.random.uniform(-1, 1, shape[1]).astype(self.dtype)
return x, gamma, beta
def forward(self, inputs, device):
x, gamma, beta = inputs
y = functions.group_normalization(x, self.groups, gamma, beta,
eps=self.eps)
return y,
def forward_expected(self, inputs):
x, gamma, beta = inputs
y = _simple_group_normalization(x, self.groups, gamma, beta,
eps=self.eps)
return y,
@testing.parameterize(*(testing.product({
'shape': [(15, 10)],
'dtype': [numpy.float32],
'eps': [1e-5, 1e-1],
})))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestMulInvStd(testing.FunctionTestCase):
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
y = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x, y
def forward(self, inputs, device):
x, y = inputs
mean = functions.mean(x, axis=1)
d = x - mean[:, None]
var = functions.mean(d * d, axis=1)
inv_std = functions.rsqrt(var + self.eps)
dummy_gamma = self.backend_config.xp.ones(
self.shape[0], dtype=self.dtype)
return gn_module._MulInvStd(
self.eps, mean.array, inv_std.array, dummy_gamma).apply((x, y))
def forward_expected(self, inputs):
x, y = inputs
inv_std = (numpy.var(x, axis=1) + self.eps) ** -0.5
z = inv_std[:, None] * y
return z,
testing.run_module(__name__, __file__)
| 4,610
| 30.367347
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/normalization_tests/test_local_response_normalization.py
|
import numpy
import six
from chainer import functions
from chainer import testing
from chainer.testing import backend
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@backend.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestLocalResponseNormalization(testing.FunctionTestCase):
def setUp(self):
self.skip_double_backward_test = True
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'atol': 5e-3, 'rtol': 5e-3}
else:
self.check_forward_options = {}
self.check_backward_options = {'atol': 3e-4, 'rtol': 3e-3}
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, (2, 7, 3, 2)).astype(self.dtype)
return x,
def forward_expected(self, inputs):
# Naive implementation
x, = inputs
y_expect = numpy.zeros_like(x)
for n, c, h, w in numpy.ndindex(x.shape):
s = 0
for i in six.moves.range(max(0, c - 2), min(7, c + 2)):
s += x[n, i, h, w] ** 2
denom = (2 + 1e-4 * s) ** .75
y_expect[n, c, h, w] = x[n, c, h, w] / denom
return y_expect,
def forward(self, inputs, device):
x, = inputs
y = functions.local_response_normalization(x)
return y,
testing.run_module(__name__, __file__)
| 1,845
| 27.84375
| 72
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/normalization_tests/test_decorrelated_batch_normalization.py
|
import numpy
from chainer import functions
from chainer import testing
def _decorrelated_batch_normalization(x, mean, projection, groups):
xs = numpy.split(x, groups, axis=1)
assert mean.shape[0] == groups
assert projection.shape[0] == groups
ys = [
_decorrelated_batch_normalization_1group(xi, m, p)
for (xi, m, p) in zip(xs, mean, projection)]
return numpy.concatenate(ys, axis=1)
def _decorrelated_batch_normalization_1group(x, mean, projection):
spatial_ndim = len(x.shape[2:])
spatial_axis = tuple(range(2, 2 + spatial_ndim))
b, C = x.shape[:2]
x_hat = x.transpose((1, 0) + spatial_axis).reshape(C, -1)
y_hat = projection.dot(x_hat - mean[:, None])
y = y_hat.reshape((C, b) + x.shape[2:]).transpose(
(1, 0) + spatial_axis)
return y
def _calc_projection(x, mean, eps, groups):
xs = numpy.split(x, groups, axis=1)
assert mean.shape[0] == groups
projections = [
_calc_projection_1group(xi, m, eps)
for (xi, m) in zip(xs, mean)]
return numpy.concatenate([p[None] for p in projections])
def _calc_projection_1group(x, mean, eps):
spatial_ndim = len(x.shape[2:])
spatial_axis = tuple(range(2, 2 + spatial_ndim))
b, C = x.shape[:2]
m = b
for i in spatial_axis:
m *= x.shape[i]
x_hat = x.transpose((1, 0) + spatial_axis).reshape(C, -1)
mean = x_hat.mean(axis=1)
x_hat = x_hat - mean[:, None]
cov = x_hat.dot(x_hat.T) / m + eps * numpy.eye(C, dtype=x.dtype)
eigvals, eigvectors = numpy.linalg.eigh(cov)
projection = eigvectors.dot(numpy.diag(eigvals ** -0.5)).dot(eigvectors.T)
return projection
def _calc_mean(x, groups):
axis = (0,) + tuple(range(2, x.ndim))
return x.mean(axis=axis).reshape(groups, -1)
@testing.parameterize(*(testing.product({
'n_channels': [8],
'ndim': [0, 2],
'groups': [1, 2],
'eps': [2e-5, 5e-1],
'dtype': [numpy.float32],
'contiguous': ['C', None],
}) + testing.product({
'n_channels': [8],
'ndim': [1],
'groups': [1, 2],
'eps': [2e-5, 5e-1],
# NOTE(crcrpar): np.linalg.eigh does not support float16
'dtype': [numpy.float32, numpy.float64],
'contiguous': ['C', None],
})))
@testing.backend.inject_backend_tests(
None,
# CPU tests
[{}]
# GPU tests
+ [{'use_cuda': True}]
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0']
})
)
class TestDecorrelatedBatchNormalization(testing.FunctionTestCase):
# TODO(crcrpar): Delete this line once double backward of
# :func:`~chainer.functions.decorrelated_batch_normalization` is
# implemented.
skip_double_backward_test = True
def setUp(self):
check_forward_options = {'atol': 1e-3, 'rtol': 1e-3}
check_backward_options = {'atol': 1e-3, 'rtol': 1e-3}
if self.dtype == numpy.float32:
check_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
self.check_forward_options = check_forward_options
self.check_backward_options = check_backward_options
def generate_inputs(self):
dtype = self.dtype
ndim = self.ndim
shape = (5, self.n_channels) + (2,) * ndim
m = 5 * 2 ** ndim
# NOTE(kataoka): The current implementation uses linalg.eigh. Small
# eigenvalues of the correlation matrix, which can be as small as
# eps=2e-5, cannot be computed with good *relative* accuracy, but
# the eigenvalues are used later as `eigvals ** -0.5`. Require the
# following is sufficiently large:
# min(eigvals[:k]) == min(singular_vals ** 2 / m + eps)
min_singular_value = 0.1
# NOTE(kataoka): Decorrelated batch normalization should be free from
# "stochastic axis swapping". Requiring a gap between singular values
# just hides mistakes in implementations.
min_singular_value_gap = 0.001
g = self.groups
zca_shape = g, self.n_channels // g, m
x = numpy.random.uniform(-1, 1, zca_shape)
mean = x.mean(axis=2, keepdims=True)
a = x - mean
u, s, vh = numpy.linalg.svd(a, full_matrices=False)
# Decrement the latter dim because of the constraint `sum(_) == 0`
k = min(zca_shape[1], zca_shape[2] - 1)
s[:, :k] += (
min_singular_value
+ min_singular_value_gap * numpy.arange(k)
)[::-1]
a = numpy.einsum('bij,bj,bjk->bik', u, s, vh)
x = a + mean
x = x.reshape((self.n_channels, shape[0]) + shape[2:]).swapaxes(0, 1)
x = x.astype(dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.decorrelated_batch_normalization(
x, groups=self.groups, eps=self.eps),
def forward_expected(self, inputs):
x, = inputs
groups = self.groups
mean = _calc_mean(x, groups)
projection = _calc_projection(x, mean, self.eps, groups)
return _decorrelated_batch_normalization(
x, mean, projection, groups),
@testing.parameterize(*(testing.product({
'n_channels': [8],
'ndim': [0, 1, 2],
'groups': [1, 2],
'eps': [2e-5, 5e-1],
'dtype': [numpy.float32],
'contiguous': ['C', None],
}) + testing.product({
'n_channels': [8],
'ndim': [1],
'groups': [1, 2],
'eps': [2e-5, 5e-1],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'contiguous': ['C', None],
})))
@testing.backend.inject_backend_tests(
None,
# CPU tests
[{}]
# GPU tests
+ [{'use_cuda': True}]
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0'],
})
)
class TestFixedDecorrelatedBatchNormalization(testing.FunctionTestCase):
# TODO(crcrpar): Delete this line once double backward of
# :func:`~chainer.functions.fixed_decorrelated_batch_normalization` is
# implemented.
skip_double_backward_test = True
def setUp(self):
C = self.n_channels // self.groups
dtype = self.dtype
self.mean = numpy.random.uniform(
-1, 1, (self.groups, C)).astype(dtype)
self.projection = numpy.random.uniform(
0.5, 1, (self.groups, C, C)).astype(dtype)
check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
check_backward_options = {'atol': 1e-4, 'rtol': 1e-3}
if self.dtype == numpy.float16:
check_forward_options = {'atol': 1e-2, 'rtol': 1e-2}
check_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
if self.dtype == numpy.float32:
check_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
self.check_forward_options = check_forward_options
self.check_backward_options = check_backward_options
def generate_inputs(self):
dtype = self.dtype
ndim = self.ndim
shape = (5, self.n_channels) + (2,) * ndim
x = numpy.random.uniform(-1, 1, shape).astype(dtype)
return x,
def forward(self, inputs, device):
x, = inputs
mean = device.send_array(self.mean.copy())
projection = device.send_array(self.projection.copy())
return functions.fixed_decorrelated_batch_normalization(
x, mean, projection, groups=self.groups
),
def forward_expected(self, inputs):
x, = inputs
mean = self.mean.copy()
projection = self.projection.copy()
return _decorrelated_batch_normalization(
x, mean, projection, self.groups),
testing.run_module(__name__, __file__)
| 7,586
| 32.570796
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/normalization_tests/test_layer_normalization.py
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*(testing.product({
'batchsize': [1, 5],
'size': [10, 20],
'dtype': [numpy.float32],
'eps': [1e-5, 1e-1],
})))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
class TestLayerNormalization(testing.FunctionTestCase):
def setUp(self):
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
def generate_inputs(self):
shape = self.batchsize, self.size
size = numpy.prod(shape) // shape[0]
x = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
gamma = numpy.random.uniform(-1, 1, size).astype(self.dtype)
beta = numpy.random.uniform(-1, 1, size).astype(self.dtype)
return x, gamma, beta
def forward_expected(self, inputs):
x, gamma, beta = inputs
mean = numpy.mean(x, axis=1, keepdims=True)
var = numpy.mean(numpy.square(x - mean), axis=1, keepdims=True)
std = numpy.sqrt(var + self.eps)
y_expected = (
numpy.expand_dims(gamma, axis=0) * (x - mean) / std
+ numpy.expand_dims(beta, axis=0))
return y_expected,
def forward(self, inputs, device):
x, gamma, beta = inputs
y = functions.layer_normalization(x, gamma, beta, eps=self.eps)
return y,
testing.run_module(__name__, __file__)
| 2,172
| 30.955882
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/normalization_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/noise_tests/test_zoneout.py
|
import unittest
import mock
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
def _zoneout(h, x, creator):
h_next = h * creator.flag_h + x * creator.flag_x
return h_next
@testing.parameterize(
{'ratio': 1},
{'ratio': 0},
{'ratio': 0.5},
{'ratio': 0.25},
)
class TestZoneout(unittest.TestCase):
def setUp(self):
self.h = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.x = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.ggh = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
self.ggx = numpy.random.uniform(-1, 1, (3, 4)).astype(numpy.float32)
def check_forward(self, h_data, x_data):
h = chainer.Variable(h_data)
x = chainer.Variable(x_data)
h_next = functions.zoneout(h, x, self.ratio)
if self.ratio == 0:
h_next_expect = x_data
elif self.ratio == 1:
h_next_expect = h_data
else:
h_next_expect = _zoneout(h_data, x_data, h_next.creator)
testing.assert_allclose(h_next.data, h_next_expect)
def check_backward(self, h_data, x_data, y_grad):
h = chainer.Variable(h_data)
x = chainer.Variable(x_data)
y = functions.zoneout(h, x, self.ratio)
d = {'creator': y.creator}
y.grad = y_grad
y.backward()
def f():
creator = d['creator']
y = _zoneout(h_data, x_data, creator)
return y,
gh, gx, = gradient_check.numerical_grad(f, (h.data, x.data,),
(y_grad,))
testing.assert_allclose(gh, h.grad, atol=1e-3)
testing.assert_allclose(gx, x.grad, atol=1e-3)
def check_double_backward(
self, h_data, x_data, y_grad, h_grad_grad, x_grad_grad):
xp = backend.get_array_module(h_data)
flag_x = xp.random.rand(*x_data.shape)
def f(h, x):
# As forward computation is executed multiple times in
# check_double_backward, use a fixed flag.
xp_str = 'numpy' if xp is numpy else 'cupy'
with mock.patch(
'{}.random.rand'.format(xp_str),
return_value=flag_x) as mock_rand:
y = functions.zoneout(h, x, self.ratio)
mock_rand.assert_called_once_with(*x.shape)
return y
gradient_check.check_double_backward(
f, (h_data, x_data), y_grad, (h_grad_grad, x_grad_grad),
dtype=numpy.float64)
def test_forward_cpu(self):
self.check_forward(self.h, self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.h), cuda.to_gpu(self.x))
def test_backward_cpu(self):
self.check_backward(self.h, self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.h),
cuda.to_gpu(self.x),
cuda.to_gpu(self.gy))
def test_double_backward_cpu(self):
self.check_double_backward(self.h, self.x, self.gy, self.ggh, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.h),
cuda.to_gpu(self.x),
cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggh),
cuda.to_gpu(self.ggx))
testing.run_module(__name__, __file__)
| 3,672
| 31.219298
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/noise_tests/test_gaussian.py
|
import unittest
import numpy
import chainer
from chainer import functions
from chainer import gradient_check
from chainer import testing
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'shape': [(3, 2), ()],
}))
@testing.backend.inject_backend_tests(
None,
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestGaussian(unittest.TestCase):
def setUp(self):
shape = self.shape
dtype = self.dtype
self.m = numpy.random.uniform(-1, 1, shape).astype(dtype)
self.v = numpy.random.uniform(-1, 1, shape).astype(dtype)
self.gy = numpy.random.uniform(-1, 1, shape).astype(dtype)
self.ggm = numpy.random.uniform(-1, 1, shape).astype(dtype)
self.ggv = numpy.random.uniform(-1, 1, shape).astype(dtype)
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
if self.dtype == numpy.float16:
self.check_backward_options['dtype'] = numpy.float64
self.check_double_backward_options['dtype'] = numpy.float64
def test_forward(self, backend_config):
m_data, v_data = backend_config.get_array((self.m, self.v))
m = chainer.Variable(m_data)
v = chainer.Variable(v_data)
# Call forward without eps and retrieve it
n1, eps = functions.gaussian(m, v, return_eps=True)
self.assertIsInstance(eps, backend_config.xp.ndarray)
self.assertEqual(n1.dtype, self.dtype)
self.assertEqual(n1.shape, m.shape)
self.assertEqual(eps.dtype, self.dtype)
self.assertEqual(eps.shape, m.shape)
# Call again with retrieved eps
n2 = functions.gaussian(m, v, eps=eps)
self.assertEqual(n2.dtype, self.dtype)
self.assertEqual(n2.shape, m.shape)
testing.assert_allclose(n1.array, n2.array)
def test_backward(self, backend_config):
m_data, v_data = backend_config.get_array((self.m, self.v))
y_grad = backend_config.get_array(self.gy)
eps = backend_config.get_array(
numpy.random.uniform(-1, 1, self.shape).astype(self.dtype))
def f(m, v):
# In case numerical gradient computation is held in more precise
# dtype than that of backward computation, cast the eps to reuse
# before the numerical computation.
eps_ = eps.astype(m.dtype)
return functions.gaussian(m, v, eps=eps_)
gradient_check.check_backward(
f, (m_data, v_data), y_grad, **self.check_backward_options)
def test_double_backward(self, backend_config):
m_data, v_data = backend_config.get_array((self.m, self.v))
y_grad = backend_config.get_array(self.gy)
m_grad_grad, v_grad_grad = (
backend_config.get_array((self.ggm, self.ggv)))
eps = backend_config.get_array(
numpy.random.uniform(-1, 1, self.shape).astype(self.dtype))
def f(m, v):
# In case numerical gradient computation is held in more precise
# dtype than that of backward computation, cast the eps to reuse
# before the numerical computation.
eps_ = eps.astype(m.dtype)
return functions.gaussian(m, v, eps=eps_)
gradient_check.check_double_backward(
f, (m_data, v_data), y_grad, (m_grad_grad, v_grad_grad),
**self.check_double_backward_options)
testing.run_module(__name__, __file__)
| 3,832
| 36.213592
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/noise_tests/test_dropout.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(
{'dtype': numpy.float16, 'ratio': 0.1},
{'dtype': numpy.float32, 'ratio': 0.3},
{'dtype': numpy.float64, 'ratio': 0.5},
{'dtype': numpy.float64, 'ratio': 0.0},
)
@testing.inject_backend_tests(
['test_forward', 'test_backward', 'test_double_backward',
'test_immutable'],
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
}))
class TestDropout(unittest.TestCase):
def setUp(self):
dtype = self.dtype
x = numpy.random.uniform(-1, 1, (2, 3)).astype(dtype)
gy = numpy.random.uniform(-1, 1, (2, 3)).astype(dtype)
ggx = numpy.random.uniform(-1, 1, (2, 3)).astype(dtype)
self.inputs = [x]
self.grad_outputs = [gy]
self.grad_grad_inputs = [ggx]
self.check_backward_options = {'dtype': numpy.float64}
self.check_double_backward_options = {'dtype': numpy.float64}
if self.dtype == numpy.float16:
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {
'dtype': numpy.float64, 'atol': 1e-3, 'rtol': 1e-2}
def forward_cpu(self, inputs, ratio, mask):
x, = inputs
if ratio == 0.0:
y_expected = x
else:
y_expected = x * mask
return y_expected,
def check_forward(self, inputs, backend_config):
if backend_config.use_cuda:
inputs = cuda.to_gpu(inputs)
with backend_config:
y = functions.dropout(*(inputs + [self.ratio]))
if backend_config.use_cudnn == 'always':
if self.ratio == 0.0:
y_expected, = inputs
testing.assert_allclose(y_expected, y.data)
else:
self.assertTrue(cuda.cupy.all(inputs[0] != y.data))
else:
# In the calculation of expected results,
# the mask used in test forward computation is reused.
mask = y.creator.mask
y_expected, = self.forward_cpu(inputs, self.ratio, mask)
assert y.data.dtype == self.dtype
testing.assert_allclose(y_expected, y.data)
def test_forward(self, backend_config):
self.check_forward(self.inputs, backend_config)
def check_backward(self, inputs, grad_outputs, backend_config):
if backend_config.use_cuda:
inputs = cuda.to_gpu(inputs)
grad_outputs = cuda.to_gpu(grad_outputs)
# Instantiate the function class directly in order to reuse the mask,
# because f will be called repeatedly.
dropout = functions.noise.dropout.Dropout(self.ratio)
def f(*inputs):
return dropout.apply(inputs)
with backend_config:
gradient_check.check_backward(
f, inputs, grad_outputs, **self.check_backward_options)
def test_backward(self, backend_config):
self.check_backward(self.inputs, self.grad_outputs, backend_config)
def check_double_backward(
self, inputs, grad_outputs, grad_grad_inputs, backend_config):
if backend_config.use_cuda:
inputs = cuda.to_gpu(inputs)
grad_outputs = cuda.to_gpu(grad_outputs)
grad_grad_inputs = cuda.to_gpu(grad_grad_inputs)
# Instantiate the function class directly in order to reuse the mask,
# because f will be called repeatedly.
dropout = functions.noise.dropout.Dropout(self.ratio)
def f(*inputs):
return dropout.apply(inputs)
with backend_config:
gradient_check.check_double_backward(
f, inputs, grad_outputs, grad_grad_inputs,
**self.check_double_backward_options)
def test_double_backward(self, backend_config):
self.check_double_backward(
self.inputs, self.grad_outputs, self.grad_grad_inputs,
backend_config)
def check_immutable(self, inputs, backend_config):
if backend_config.use_cuda:
inputs = cuda.to_gpu(inputs)
with backend_config:
dropout = functions.noise.dropout.Dropout(0.5)
y1, = dropout.apply(inputs)
y2, = dropout.apply(inputs)
testing.assert_allclose(y1.data, y2.data)
def test_immutable(self, backend_config):
self.check_immutable(self.inputs, backend_config)
@testing.parameterize(*testing.product({
'specify_mask': [True, False],
'train': [True, False],
}))
@testing.inject_backend_tests(
['test_forward'],
testing.product({
'use_ideep': ['never', 'always'],
})
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
})
)
class TestDropoutMask(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
self.mask = (numpy.random.uniform(-1, 1, (2, 3)) > 0).astype(
numpy.float32)
def _check(self, backend_config):
mask = self.mask if self.specify_mask else None
x, mask = backend_config.get_array((self.x, mask))
with chainer.using_config('train', self.train), backend_config:
out, out_mask = functions.dropout(
x, 0.5, mask=mask, return_mask=True)
if self.train:
assert isinstance(out_mask, type(out.array))
if mask is None:
assert out_mask.shape == out.array.shape
else:
assert out_mask is mask
else:
assert out_mask is None
with chainer.using_config('train', self.train):
out2 = functions.dropout(self.x, 0.5, mask=cuda.to_cpu(out_mask))
testing.assert_allclose(out.array, out2.array)
def test_forward(self, backend_config):
self._check(backend_config)
@testing.parameterize(*testing.product({
'use_cudnn': ['never', 'always'],
'dropout': [0, 0.5],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestDropoutCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
def forward(self):
return functions.dropout(chainer.Variable(self.x), self.dropout)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
with testing.patch(
'chainer.backends.cuda.get_cudnn_dropout_states') as func:
self.forward()
assert func.called == (self.use_cudnn == 'always')
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
y.grad = self.gy
with testing.patch(
'chainer.backends.cuda.get_cudnn_dropout_states') as func:
y.backward()
assert func.called == (self.use_cudnn == 'always')
testing.run_module(__name__, __file__)
| 7,443
| 32.836364
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/noise_tests/test_simplified_dropconnect.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float16, numpy.float32, numpy.float64],
'ratio': [0.0, 0.9],
'train': [True, False],
'use_batchwise_mask': [True, False],
}))
class TestSimplifiedDropconnect(unittest.TestCase):
def setUp(self):
self.W = numpy.random.uniform(
-1, 1, (2, 3)).astype(self.W_dtype)
self.b = numpy.random.uniform(
-1, 1, 2).astype(self.x_dtype)
self.x = numpy.random.uniform(-1, 1, (4, 3)).astype(self.x_dtype)
self.gy = numpy.random.uniform(-1, 1, (4, 2)).astype(self.x_dtype)
self.ggW = numpy.random.uniform(
-1, 1, (2, 3)).astype(self.W_dtype)
self.ggb = numpy.random.uniform(
-1, 1, 2).astype(self.x_dtype)
self.ggx = numpy.random.uniform(-1, 1, (4, 3)).astype(self.x_dtype)
self.y = self.x.dot(self.W.T) + self.b
self.check_forward_options = {}
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-3}
self.check_double_backward_options = {'atol': 1e-2, 'rtol': 1e-2}
if self.x_dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
self.check_double_backward_options = {
'dtype': numpy.float64, 'atol': 5e-2, 'rtol': 5e-2}
elif self.W_dtype == numpy.float16:
self.check_backward_options = {
'dtype': numpy.float64, 'atol': 1e-2, 'rtol': 1e-2}
self.check_double_backward_options = {
'dtype': numpy.float64, 'atol': 1e-1, 'rtol': 1e-1}
def check_forward(self, x_data, W_data, b_data):
# Check only data type, y is tested by SimplifiedDropconnect link test.
x = chainer.Variable(x_data)
W = chainer.Variable(W_data)
if b_data is None:
y = functions.simplified_dropconnect(x, W, None,
self.ratio, self.train, None,
self.use_batchwise_mask)
else:
b = chainer.Variable(b_data)
y = functions.simplified_dropconnect(x, W, b,
self.ratio, self.train, None,
self.use_batchwise_mask)
self.assertEqual(y.data.dtype, self.x_dtype)
mask = y.creator.mask
mask = cuda.to_cpu(mask)
if self.use_batchwise_mask:
self.assertEqual(mask.shape, (x.shape[0],) + W.shape)
else:
self.assertEqual(mask.shape, W.shape)
def test_forward_cpu(self):
self.check_forward(self.x, self.W, self.b)
def test_forward_cpu_nobias(self):
self.check_forward(self.x, self.W, None)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x), cuda.to_gpu(self.W), cuda.to_gpu(self.b))
@attr.gpu
def test_forward_gpu_nobias(self):
self.check_forward(
cuda.to_gpu(self.x), cuda.to_gpu(self.W), None)
def check_backward(self, x_data, W_data, b_data, y_grad):
args = x_data, W_data
if b_data is not None:
args += b_data,
if self.use_batchwise_mask:
mask_shape = (x_data.shape[0],) + W_data.shape
else:
mask_shape = W_data.shape
xp = backend.get_array_module(x_data)
mask = xp.random.rand(*mask_shape) >= self.ratio
def f(x, W, b=None):
return functions.simplified_dropconnect(
x, W, b, self.ratio, self.train, mask,
self.use_batchwise_mask)
gradient_check.check_backward(
f, args, y_grad, eps=1e-2, **self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.W, self.b, self.gy)
def test_backward_cpu_nobias(self):
self.check_backward(self.x, self.W, None, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b), cuda.to_gpu(self.gy))
@attr.gpu
def test_backward_gpu_nobias(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
None, cuda.to_gpu(self.gy))
def check_double_backward(
self, x_data, W_data, b_data, y_grad,
x_grad_grad, W_grad_grad, b_grad_grad):
args = x_data, W_data
grads = x_grad_grad, W_grad_grad
if b_data is not None:
args += b_data,
grads += b_grad_grad,
if self.use_batchwise_mask:
mask_shape = (x_data.shape[0],) + W_data.shape
else:
mask_shape = W_data.shape
xp = backend.get_array_module(x_data)
mask = xp.random.rand(*mask_shape) >= self.ratio
def f(x, W, b=None):
return functions.simplified_dropconnect(
x, W, b, self.ratio, self.train, mask,
self.use_batchwise_mask)
gradient_check.check_double_backward(
f, args, y_grad, grads, eps=1e-2,
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(
self.x, self.W, self.b, self.gy, self.ggx, self.ggW, self.ggb)
def test_double_backward_cpu_nobias(self):
self.check_double_backward(
self.x, self.W, None, self.gy, self.ggx, self.ggW, None)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.W), cuda.to_gpu(self.b),
cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),
cuda.to_gpu(self.ggW), cuda.to_gpu(self.ggb))
@attr.gpu
def test_double_backward_gpu_nobias(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.W), None,
cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx),
cuda.to_gpu(self.ggW), None)
testing.run_module(__name__, __file__)
| 6,462
| 35.721591
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/noise_tests/test_gumbel_softmax.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestGumbelSoftmax(unittest.TestCase):
def setUp(self):
self.log_pi = numpy.random.uniform(
-1, 1, self.shape).astype(numpy.float32)
self.tau = numpy.float32(numpy.random.uniform(0.1, 10.0))
def check_forward(self, log_pi_data, tau):
log_pi = chainer.Variable(log_pi_data)
y = functions.gumbel_softmax(log_pi, tau=tau)
# Only checks dtype and shape because its result contains noise
self.assertEqual(y.dtype, numpy.float32)
self.assertEqual(y.shape, log_pi.shape)
self.assertEqual(
backend.get_array_module(y),
backend.get_array_module(log_pi))
def test_forward_cpu(self):
self.check_forward(self.log_pi, self.tau)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.log_pi), self.tau)
testing.run_module(__name__, __file__)
| 1,228
| 26.931818
| 71
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/noise_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_repeat.py
|
import unittest
import numpy
from chainer import functions
from chainer import testing
def _repeat(arr, repeats, axis=None):
# Workaround NumPy 1.9 issue.
if isinstance(repeats, tuple) and len(repeats) == 1:
repeats = repeats[0]
return numpy.repeat(arr, repeats, axis)
@testing.parameterize(*testing.product({
# repeats is any of (int, bool or tuple) and
# axis is any of (int or None).
'params': (
# Repeats 1-D array
testing.product({
'shape': [(2,)],
'repeats': [0, 1, 2, True, (0,), (1,), (2,), (True,)],
'axis': [None, 0],
}) +
# Repeats 2-D array (with axis=None)
testing.product({
'shape': [(3, 2)],
'repeats': [4, (4,), (4,) * 6, (True,) * 6],
'axis': [None],
}) +
# Repeats 2-D array (with axis=0)
testing.product({
'shape': [(3, 2)],
'repeats': [5, (5,), (5,) * 3],
'axis': [0],
}) +
# Repeats 2-D array (with axis=1)
testing.product({
'shape': [(3, 2)],
'repeats': [5, (5,), (5,) * 2],
'axis': [1],
}) +
# Repeats 3-D array (with axis=-2)
testing.product({
'shape': [(3, 2, 4)],
'repeats': [5, (5,), (5,) * 2],
'axis': [-2],
})
),
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestRepeat(testing.FunctionTestCase):
def setUp(self):
self.in_shape = self.params['shape']
self.repeats = self.params['repeats']
self.axis = self.params['axis']
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 5e-4, 'rtol': 5e-3})
self.check_backward_options.update({
'atol': 2 ** -4, 'rtol': 2 ** -4})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.in_shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
y_expected = _repeat(x, self.repeats, self.axis)
return y_expected,
def forward(self, inputs, devices):
x, = inputs
y = functions.repeat(x, self.repeats, self.axis)
return y,
@testing.parameterize(*testing.product({
'repeats': [-1, (-1, -1)],
'axis': [-1],
}))
class TestRepeatValueError(unittest.TestCase):
def test_value_error(self):
x = numpy.random.uniform(-1, 1, (2,)).astype(numpy.float32)
with self.assertRaises(ValueError):
functions.repeat(x, self.repeats, self.axis)
class TestRepeatTypeError(unittest.TestCase):
def test_type_error_repeats_str(self):
x = numpy.random.uniform(-1, 1, (2,)).astype(numpy.float32)
with self.assertRaises(TypeError):
functions.repeat(x, 'a')
def test_type_error_axis_str(self):
x = numpy.random.uniform(-1, 1, (2,)).astype(numpy.float32)
with self.assertRaises(TypeError):
functions.repeat(x, 1, 'a')
def test_type_error_axis_bool(self):
x = numpy.random.uniform(-1, 1, (2,)).astype(numpy.float32)
with self.assertRaises(TypeError):
functions.repeat(x, 1, True)
testing.run_module(__name__, __file__)
| 3,655
| 27.341085
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_pad.py
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product_dict(
[
{'shape': (), 'pad_width': 1, 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': 0, 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': 1, 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': (1, 2), 'mode': 'constant'},
{'shape': (2, 3), 'pad_width': ((1, 2), (3, 4)), 'mode': 'constant'},
{'shape': (2, 3, 2), 'pad_width': ((2, 5), (1, 2), (0, 7)),
'mode': 'constant'},
{'shape': (1, 3, 5, 2), 'pad_width': 2, 'mode': 'constant'}
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64}
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestPadDefault(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options.update({'atol': 3e-2, 'rtol': 3e-2})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.pad(x, self.pad_width, self.mode)
return y,
def forward_expected(self, inputs):
x, = inputs
y_expected = numpy.pad(x, self.pad_width, self.mode)
return y_expected.astype(self.dtype),
@testing.parameterize(*testing.product_dict(
[
{'shape': (2, 3), 'pad_width': 1, 'mode': 'constant',
'constant_values': 1},
{'shape': (2, 3), 'pad_width': (1, 2), 'mode': 'constant',
'constant_values': (1, 2)},
{'shape': (2, 3), 'pad_width': ((1, 2), (3, 4)), 'mode': 'constant',
'constant_values': ((1, 2), (3, 4))},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64}
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
# Old numpy does not work with multi-dimensional constant_values
@testing.with_requires('numpy>=1.11.1')
class TestPad(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options.update({'atol': 3e-2, 'rtol': 3e-2})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
y_expected = numpy.pad(x, self.pad_width, mode=self.mode,
constant_values=self.constant_values)
return y_expected,
def forward(self, inputs, device):
x, = inputs
y = functions.pad(x, self.pad_width, mode=self.mode,
constant_values=self.constant_values)
return y,
testing.run_module(__name__, __file__)
| 3,560
| 27.717742
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_im2col.py
|
import unittest
import numpy
from six import moves
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.utils.conv import get_conv_outsize
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
@testing.parameterize(*testing.product_dict(
[
{'params': (1, 1, 1, 1, 1, 1, 1, 1)},
{'params': (2, 2, 2, 2, 2, 2, 2, 2)},
{'params': (1, 2, 2, 1, 1, 2, 1, 1)},
{'params': (1, 2, 3, 4, 1, 2, 1, 1)},
{'params': (1, 2, 3, 4, 4, 5, 2, 3)},
{'params': (3, 3, 2, 2, 1, 1, 1, 1)}
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64}
]
))
class TestIm2ColForward(unittest.TestCase):
in_shape = (2, 3, 8, 6)
def setUp(self):
self.x = numpy.random.uniform(
size=self.in_shape).astype(self.dtype)
def check_forward(self, x, kh, kw, sy, sx, ph, pw, dy, dx, gpu):
x = x.copy()
n, c, h, w = x.shape
col = functions.im2col(
x, (kh, kw), (sy, sx), (ph, pw), dilate=(dy, dx)).data
col_h = get_conv_outsize(h, kh, sy, ph, d=dy)
col_w = get_conv_outsize(w, kw, sx, pw, d=dx)
self.assertEqual(col.shape, (n, c * kh * kw, col_h, col_w))
col = col.reshape(n, c, kh, kw, col_h, col_w)
col = cuda.to_cpu(col)
for y in moves.range(col_h):
for x in moves.range(col_w):
for ky in moves.range(kh):
for kx in moves.range(kw):
oy = y * sy - ph + ky * dy
ox = x * sx - pw + kx * dx
if 0 <= oy < h and 0 <= ox < w:
testing.assert_allclose(
col[:, :, ky, kx, y, x],
self.x[:, :, oy, ox])
else:
testing.assert_allclose(
col[:, :, ky, kx, y, x],
numpy.zeros((2, 3), self.dtype))
def test_forward_cpu(self):
self.check_forward(self.x, *self.params, gpu=False)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), *self.params, gpu=True)
@testing.parameterize(*testing.product_dict(
[
{'ksize': 1, 'stride': 1, 'pad': 0, 'dilate': 1},
{'ksize': (1, 1), 'stride': (1, 1), 'pad': (1, 0), 'dilate': (1, 1)},
{'ksize': 2, 'stride': 2, 'pad': 2, 'dilate': 2},
{'ksize': (2, 3), 'stride': (1, 2), 'pad': 0, 'dilate': (2, 1)},
],
[
{'cover_all': False},
{'cover_all': True},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
]
))
class TestIm2Col(unittest.TestCase):
in_shape = (2, 3, 8, 6)
def setUp(self):
self.x = numpy.random.uniform(
size=self.in_shape).astype(self.dtype)
kh, kw = _pair(self.ksize)
sy, sx = _pair(self.stride)
ph, pw = _pair(self.pad)
dy, dx = _pair(self.dilate)
N, C, H, W = self.in_shape
o_H = get_conv_outsize(H, kh, sy, ph, cover_all=self.cover_all, d=dy)
o_W = get_conv_outsize(W, kw, sx, pw, cover_all=self.cover_all, d=dx)
self.gy = numpy.random.uniform(
size=(N, C * kh * kw, o_H, o_W)).astype(self.dtype)
self.ggx = numpy.random.uniform(
size=self.in_shape).astype(self.dtype)
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
if self.dtype is numpy.float16:
self.check_backward_options.update({'atol': 2e-3, 'rtol': 1e-2})
self.check_double_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
if self.dtype is numpy.float16:
self.check_double_backward_options.update(
{'atol': 1e-3, 'rtol': 1e-2})
def check_backward(self, x, ksize, stride, pad, cover_all, dilate, gy):
def f(x):
return functions.im2col(
x, ksize, stride=stride, pad=pad, cover_all=cover_all,
dilate=dilate)
gradient_check.check_backward(
f, x, gy, dtype=numpy.float64, **self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(
self.x, self.ksize, self.stride, self.pad, self.cover_all,
self.dilate, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), self.ksize, self.stride, self.pad,
self.cover_all, self.dilate, cuda.to_gpu(self.gy))
def check_double_backward(self, x, ksize, stride, pad, cover_all, dilate,
gy, ggx):
def f(x):
return functions.im2col(
x, ksize, stride=stride, pad=pad, cover_all=cover_all,
dilate=dilate)
gradient_check.check_double_backward(
f, x, gy, ggx, dtype=numpy.float64,
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(
self.x, self.ksize, self.stride, self.pad, self.cover_all,
self.dilate, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x),
self.ksize, self.stride, self.pad, self.cover_all, self.dilate,
cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
testing.run_module(__name__, __file__)
| 5,631
| 31.554913
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_separate.py
|
import numpy
from chainer import functions
from chainer import testing
from chainer.utils import force_array
@testing.parameterize(*testing.product_dict(
[
{'shape': (2, 3, 4), 'axis': 0},
{'shape': (2, 3, 4), 'axis': 1},
{'shape': (2, 3, 4), 'axis': 2},
{'shape': (2, 3, 4), 'axis': -1},
{'shape': (2, 3, 4), 'axis': -3},
{'shape': (2,), 'axis': 0},
{'shape': (2,), 'axis': -1},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
]
))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestSeparate(testing.FunctionTestCase):
def setUp(self):
self.skip_double_backward_test = True
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.separate(x, self.axis)
def forward_expected(self, inputs):
x, = inputs
return tuple(
force_array(x.take(i, axis=self.axis))
for i in range(self.shape[self.axis])
)
testing.run_module(__name__, __file__)
| 1,543
| 22.753846
| 70
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_spatial_transformer_sampler.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer import Variable
def _identiy_grid(in_shape):
mesh = numpy.meshgrid(
numpy.linspace(-1., 1., num=in_shape[2]),
numpy.linspace(-1., 1., num=in_shape[3]))
grid = numpy.concatenate([mesh[0][None], mesh[1][None]], axis=0)
grid = numpy.repeat(grid[None], in_shape[0], axis=0).astype(numpy.float32)
return grid
def _rotate_grid(in_shape):
mesh = numpy.meshgrid(
numpy.linspace(-1., 1., num=in_shape[2]),
numpy.linspace(-1., 1., num=in_shape[3]))
mesh = [numpy.rot90(mesh[0]), numpy.rot90(mesh[1])]
grid = numpy.concatenate([mesh[0][None], mesh[1][None]], axis=0)
grid = numpy.repeat(grid[None], in_shape[0], axis=0).astype(numpy.float32)
return grid
def _rotate_BCHW(x):
rotated_xs = []
for i in range(x.shape[0]):
x_i = x[i].transpose(1, 2, 0)
x_i = numpy.rot90(x_i)
rotated_xs.append(x_i.transpose(2, 0, 1))
rotated_xs = numpy.concatenate([r_x[None] for r_x in rotated_xs], axis=0)
return rotated_xs
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_cudnn': ['always', 'never'],
}))
class TestSpatialTransformerSampler(unittest.TestCase):
in_shape = (2, 2, 4, 4)
out_shape = (2, 2, 3, 3)
grid_shape = (2, 2, 3, 3)
def setUp(self):
self.x = numpy.random.uniform(
size=self.in_shape).astype(self.dtype)
self.grid = numpy.random.uniform(
low=-2., high=2., size=self.grid_shape).astype(self.dtype)
self.grads = numpy.random.uniform(
size=self.out_shape).astype(self.dtype)
def check_forward(self, x, grid):
y = functions.spatial_transformer_sampler(x, grid)
self.assertEqual(y.shape, self.out_shape)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.grid)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.grid))
def check_backward(self, x, grid, grads):
gradient_check.check_backward(
functions.spatial_transformer_sampler,
(x, grid), (grads,), dtype='d', atol=1e-2, rtol=1e-2, eps=1e-5)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.grid, self.grads)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_backward(cuda.to_gpu(self.x),
cuda.to_gpu(self.grid),
cuda.to_gpu(self.grads))
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestSpatialTransformerSamplerConsistencyWithCuDNN(unittest.TestCase):
in_shape = (2, 2, 4, 4)
out_shape = (2, 2, 3, 3)
grid_shape = (2, 2, 3, 3)
def setUp(self):
if self.dtype == numpy.float16:
# Use fixed random values to avoid non-differential inputs
uniform = numpy.random.RandomState(0).uniform
else:
uniform = numpy.random.uniform
self.x = uniform(size=self.in_shape).astype(self.dtype)
self.grid = uniform(
low=-2, high=2, size=self.grid_shape).astype(self.dtype)
self.grads = uniform(size=self.out_shape).astype(self.dtype)
if self.dtype == numpy.float16:
self.assert_options = {'atol': 1e-2}
else:
self.assert_options = {}
def _apply_backward(self, x, grid, grads):
x = Variable(x)
grid = Variable(grid)
y = functions.spatial_transformer_sampler(x, grid)
x.cleargrad()
grid.cleargrad()
y.grad = grads
y.backward()
return x, grid, y
@attr.gpu
@attr.cudnn
def test_consistency_with_cudnn_cpu(self):
with chainer.using_config('use_cudnn', 'never'):
x_cpu, grid_cpu, y_cpu = self._apply_backward(
self.x, self.grid, self.grads)
with chainer.using_config('use_cudnn', 'always'):
x_cudnn, grid_cudnn, y_cudnn = self._apply_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.grid),
cuda.to_gpu(self.grads))
testing.assert_allclose(
y_cpu.data, y_cudnn.data, **self.assert_options)
testing.assert_allclose(
x_cpu.grad, x_cudnn.grad, **self.assert_options)
testing.assert_allclose(
grid_cpu.grad, grid_cudnn.grad, **self.assert_options)
@attr.gpu
@attr.cudnn
def test_consistency_with_cudnn_gpu(self):
with chainer.using_config('use_cudnn', 'never'):
x_gpu, grid_gpu, y_gpu = self._apply_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.grid),
cuda.to_gpu(self.grads))
with chainer.using_config('use_cudnn', 'always'):
x_cudnn, grid_cudnn, y_cudnn = self._apply_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.grid),
cuda.to_gpu(self.grads))
testing.assert_allclose(
y_gpu.data, y_cudnn.data, **self.assert_options)
testing.assert_allclose(
x_gpu.grad, x_cudnn.grad, **self.assert_options)
testing.assert_allclose(
grid_gpu.grad, grid_cudnn.grad, **self.assert_options)
@testing.parameterize(
{'grid_creator': _identiy_grid, 'operator': lambda x: x,
'use_cudnn': 'always'},
{'grid_creator': _identiy_grid, 'operator': lambda x: x,
'use_cudnn': 'never'},
{'grid_creator': _rotate_grid, 'operator': _rotate_BCHW,
'use_cudnn': 'always'},
{'grid_creator': _rotate_grid, 'operator': _rotate_BCHW,
'use_cudnn': 'never'},
)
class TestSpatialTransformerSamplerForwardToyCases(unittest.TestCase):
in_shape = (2, 2, 4, 4)
grid_shape = (2, 2, 3, 3)
def setUp(self):
self.x = numpy.random.uniform(
size=self.in_shape).astype(numpy.float32)
self.grid = self.grid_creator(self.in_shape)
def check_forward(self, x, grid):
y = functions.spatial_transformer_sampler(x, grid)
testing.assert_allclose(y.data, self.operator(self.x))
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.grid)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.grid))
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'never'],
}))
class TestSpatialTransformerSamplerForwardPaddedImage(unittest.TestCase):
in_shape = (1, 2, 4, 4)
def setUp(self):
self.x = numpy.random.uniform(
size=self.in_shape).astype(numpy.float32)
p1 = [[-0.5], [-0.5]]
p2 = [[3.5], [3.5]]
p3 = [[2], [3.5]]
p4 = [[-0.5], [2]]
self.grid = numpy.concatenate((p1, p2, p3, p4), axis=1)
self.grid = self.grid.reshape(1, 2, 4, 1).astype(numpy.float32)
# Scale the coordinates so that the pixels inside the input image
# lies in range [-1, 1].
self.grid[:, 0] =\
((self.grid[:, 0] / (self.in_shape[3] - 1)) - 0.5) * 2
self.grid[:, 1] =\
((self.grid[:, 1] / (self.in_shape[2] - 1)) - 0.5) * 2
exp_p1 = self.x[0, :, 0, 0] / 4
exp_p2 = self.x[0, :, 3, 3] / 4
exp_p3 = self.x[0, :, 3, 2] / 2
exp_p4 = self.x[0, :, 2, 0] / 2
self.expected = numpy.concatenate(
(exp_p1[:, None],
exp_p2[:, None],
exp_p3[:, None],
exp_p4[:, None]), axis=1)
self.expected = self.expected.reshape(1, 2, 4, 1).astype(numpy.float32)
def check_forward(self, x, grid, expected):
y = functions.spatial_transformer_sampler(x, grid)
testing.assert_allclose(y.data, expected)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.grid, self.expected)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.grid),
cuda.to_gpu(self.expected))
testing.run_module(__name__, __file__)
| 8,708
| 33.152941
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_rollaxis.py
|
import unittest
import numpy
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
@testing.parameterize(
{'axis': 0, 'start': 2, 'out_shape': (3, 2, 4)},
{'axis': 2, 'start': 0, 'out_shape': (4, 2, 3)},
{'axis': 1, 'start': 1, 'out_shape': (2, 3, 4)},
{'axis': -3, 'start': 2, 'out_shape': (3, 2, 4)},
{'axis': -1, 'start': 0, 'out_shape': (4, 2, 3)},
{'axis': -2, 'start': -2, 'out_shape': (2, 3, 4)},
{'axis': 0, 'start': 3, 'out_shape': (3, 4, 2)},
{'axis': 2, 'start': -3, 'out_shape': (4, 2, 3)},
{'axis': 0, 'start': 0, 'out_shape': (2, 3, 4)},
)
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestRollaxis(testing.FunctionTestCase):
dtype = numpy.float32
def setUp(self):
self.check_backward_options = {}
self.check_double_backward_options.update({'atol': 1e-3, 'rtol': 1e-2})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.rollaxis(x, self.axis, self.start)
return y,
def forward_expected(self, inputs):
x, = inputs
y_expect = numpy.rollaxis(x, self.axis, self.start)
return y_expect,
@testing.parameterize(
{'axis': 3, 'start': 0},
{'axis': -4, 'start': 0},
{'axis': 0, 'start': 4},
{'axis': 0, 'start': -4},
)
class TestRollaxisInvalidType(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
def check_type_error(self, x):
with self.assertRaises(type_check.InvalidType):
functions.rollaxis(x, self.axis, self.start)
def test_type_error_cpu(self):
self.check_type_error(self.x)
@attr.gpu
def test_type_error_gpu(self):
self.check_type_error(cuda.to_gpu(self.x))
class TestRollaxisInvalidTypeError(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
def test_invalid_axis(self):
with self.assertRaises(TypeError):
functions.rollaxis(self.x, 'a', start=0)
def test_invalid_start(self):
with self.assertRaises(TypeError):
functions.rollaxis(self.x, 0, start='a')
testing.run_module(__name__, __file__)
| 2,753
| 26
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_swapaxes.py
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'in_shape': [(3, 4, 2)],
'axis1': [0],
'axis2': [1],
'dtype': [numpy.float16, numpy.float32, numpy.float32],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestSwapaxes(testing.FunctionTestCase):
def generate_inputs(self):
x = numpy.random.uniform(
0.5, 1, self.in_shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
y_expected = x.swapaxes(self.axis1, self.axis2)
return y_expected,
def forward(self, inputs, devices):
x, = inputs
y = functions.swapaxes(x, self.axis1, self.axis2)
return y,
testing.run_module(__name__, __file__)
| 1,126
| 21.54
| 60
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_pad_sequence.py
|
import contextlib
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@contextlib.contextmanager
def disable_debug_mode_if(disable):
if disable:
with chainer.using_config('debug', False):
yield
else:
yield
@testing.parameterize(*testing.product({
'lengths': [[2, 1, 5, 3], [2], [0]],
'length': [None, 6, 'max'],
'shape': [(3, 4), ()],
'pad': [0, -1, float('inf'), float('nan')],
'dtype': [numpy.bool_, numpy.int8, numpy.int16, numpy.int32,
numpy.uint8, numpy.uint16, numpy.uint32,
numpy.float16, numpy.float32, numpy.float64],
}))
class TestPadSequence(unittest.TestCase):
def setUp(self):
self.xs = [
numpy.random.uniform(-1, 1, (l,) + self.shape).astype(self.dtype)
for l in self.lengths]
self.ggxs = [
numpy.random.uniform(-1, 1, (l,) + self.shape).astype(self.dtype)
for l in self.lengths]
if self.length == 'max':
self.length = max(self.lengths)
if self.length:
max_length = self.length
else:
max_length = max(self.lengths)
self.y_shape = (len(self.lengths), max_length,) + self.shape
self.gy = numpy.random.uniform(-1, 1, self.y_shape).astype(self.dtype)
self.check_double_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
if self.dtype == numpy.float16:
self.check_double_backward_options.update({'atol': 5e-3})
self.can_include_nan = numpy.isnan(self.pad)
def check_forward(self, xs):
# Non-finite values does not work for integer values.
if not numpy.isfinite(self.pad) and \
numpy.dtype(self.dtype).kind != 'f':
return
with disable_debug_mode_if(self.can_include_nan):
y = functions.pad_sequence(
xs, length=self.length, padding=self.pad)
self.assertEqual(y.shape, self.y_shape)
for i, (length, x) in enumerate(six.moves.zip(self.lengths, self.xs)):
testing.assert_allclose(y.data[i, 0:length], x)
testing.assert_allclose(
y.data[i, length:], self.dtype(self.pad))
def test_forward_cpu(self):
self.check_forward(self.xs)
@attr.gpu
def test_forward_gpu(self):
self.check_forward([cuda.to_gpu(x) for x in self.xs])
def check_backward(self, xs, gy):
# Numerical gradient dos not work with non-finite values.
# Gradients for integer values are not defined.
if not numpy.isfinite(self.pad) or numpy.dtype(self.dtype).kind != 'f':
return
def f(*xs):
return functions.pad_sequence(
xs, length=self.length, padding=self.pad)
gradient_check.check_backward(f, xs, gy, dtype=numpy.float64)
def test_backward_cpu(self):
self.check_backward(self.xs, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
[cuda.to_gpu(x) for x in self.xs], cuda.to_gpu(self.gy))
def check_double_backward(self, xs, gy, ggxs):
if not numpy.isfinite(self.pad) or numpy.dtype(self.dtype).kind != 'f':
return
def f(*xs):
return functions.pad_sequence(
xs, length=self.length, padding=self.pad)
gradient_check.check_double_backward(
f, xs, gy, ggxs, dtype=numpy.float64,
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.xs, self.gy, self.ggxs)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
[cuda.to_gpu(x) for x in self.xs], cuda.to_gpu(self.gy),
[cuda.to_gpu(ggx) for ggx in self.ggxs])
testing.run_module(__name__, __file__)
| 3,985
| 30.888
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_spatial_transformer_grid.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_cudnn': ['always', 'never'],
}))
class TestSpatialTransformerGrid(unittest.TestCase):
def setUp(self):
B = 3
self.theta = numpy.random.uniform(size=(B, 2, 3)).astype(self.dtype)
self.output_shape = (5, 6)
self.grads = numpy.random.uniform(
size=(B, 2) + self.output_shape).astype(self.dtype)
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-3}
self.check_backward_options = {'atol': 1e-2, 'rtol': 1e-1}
else:
self.check_forward_options = {}
self.check_backward_options = {}
def check_forward(self, theta, output_shape):
grid = functions.spatial_transformer_grid(theta, output_shape).data
theta = cuda.to_cpu(theta)
B = theta.shape[0]
H, W = output_shape
expected = []
for b in range(B):
for i in numpy.linspace(-1., 1., H):
for j in numpy.linspace(-1., 1., W):
coord = numpy.array([j, i, 1])
expected.append(self.theta[b].dot(coord))
expected = numpy.array(
expected).reshape(B, H, W, 2).transpose(0, 3, 1, 2)
testing.assert_allclose(grid, expected, **self.check_forward_options)
self.assertEqual(grid.dtype, self.dtype)
def test_forward_cpu(self):
self.check_forward(self.theta, self.output_shape)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.theta), self.output_shape)
def check_backward(self, theta, output_shape, grads):
def f(theta):
return functions.spatial_transformer_grid(theta, output_shape)
with chainer.using_config('use_cudnn', self.use_cudnn):
gradient_check.check_backward(
f, (theta,), (grads,), dtype=numpy.float64,
**self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.theta, self.output_shape, self.grads)
@attr.gpu
def test_backward_gpu(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_backward(cuda.to_gpu(self.theta), self.output_shape,
cuda.to_gpu(self.grads))
testing.run_module(__name__, __file__)
| 2,621
| 32.615385
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_concat.py
|
import unittest
import numpy
from chainer import functions
from chainer import testing
from chainer.testing import backend
@backend.inject_backend_tests(
None,
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ [{'use_cuda': True}]
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
])
@testing.parameterize(*testing.product_dict(
[
{'shape': (2, 7, 3), 'axis': 1,
'slices': [(slice(None), slice(None, 2)), (slice(None), slice(2, 5)),
(slice(None), slice(5, None))]},
{'shape': (7, 3), 'axis': 0,
'slices': [slice(None, 2), slice(2, 5), slice(5, None)]},
{'shape': (2,), 'axis': 0, 'slices': [slice(None, 1), slice(1, None)]},
{'shape': (2,), 'axis': 0, 'slices': [()]},
{'shape': (2, 7, 3), 'axis': 1,
'slices': [(slice(None), slice(None, 2)), (slice(None), slice(2, 5)),
(slice(None), slice(5, None))]},
{'shape': (2, 7, 3), 'axis': 1,
'slices': [(slice(None), slice(None, 2)), (slice(None), slice(2, 5)),
(slice(None), slice(5, None))]},
{'shape': (2, 7, 3), 'axis': -2,
'slices': [(slice(None), slice(None, 2)), (slice(None), slice(2, 5)),
(slice(None), slice(5, None))]},
{'shape': (7, 3, 2, 2), 'axis': 0,
'slices': [slice(None, 2), slice(2, 5), slice(5, None)]},
{'shape': (2, 7, 3, 5), 'axis': 1,
'slices': [(slice(None), slice(None, 2), slice(None)),
(slice(None), slice(2, 5), slice(None)),
(slice(None), slice(5, None), slice(None))]},
{'shape': (2, 7, 3, 5), 'axis': -1,
'slices': [(slice(None), slice(None), slice(None), slice(None, 2)),
(slice(None), slice(None), slice(None), slice(2, 3)),
(slice(None), slice(None), slice(None), slice(3, None))]},
{'shape': (2, 7, 3, 5), 'axis': -3,
'slices': [(slice(None), slice(None, 2), slice(None)),
(slice(None), slice(2, 5), slice(None)),
(slice(None), slice(5, None), slice(None))]},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
class TestConcat(testing.FunctionTestCase):
def generate_inputs(self):
shape = self.shape
dtype = self.dtype
y = numpy.random.uniform(-1, 1, shape).astype(dtype)
xs = tuple([y[s] for s in self.slices])
return xs
def forward(self, inputs, device):
y = functions.concat(inputs, self.axis)
return y,
def forward_expected(self, inputs):
y = numpy.concatenate(inputs, self.axis)
return y,
class TestConcatInvalidAxisType(unittest.TestCase):
def test_invlaid_axis_type(self):
inputs = [numpy.random.rand(3, 4), numpy.random.rand(3, 1)]
with self.assertRaises(TypeError):
functions.concat(inputs, 'a')
testing.run_module(__name__, __file__)
| 3,200
| 34.175824
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_select_item.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product_dict(
[
{'in_shape': (10, 5), 'out_shape': (10,)},
{'in_shape': (0, 5), 'out_shape': (0,)},
{'in_shape': (1, 33), 'out_shape': (1,)},
{'in_shape': (10, 5), 'out_shape': (10,)},
{'in_shape': (10, 5), 'out_shape': (10,)},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
class TestSelectItem(unittest.TestCase):
def setUp(self):
self.x_data = numpy.random.uniform(
-1, 1, self.in_shape).astype(self.dtype)
self.t_data = numpy.random.randint(
0, 2, self.out_shape).astype(numpy.int32)
self.gy_data = numpy.random.uniform(
-1, 1, self.out_shape).astype(self.dtype)
self.ggx_data = numpy.random.uniform(
-1, 1, self.in_shape).astype(self.dtype)
self.check_backward_options = {'atol': 0.01, 'rtol': 0.01}
if self.dtype == numpy.float16:
self.check_backward_options = {'atol': 0.1, 'rtol': 0.1}
def check_forward(self, x_data, t_data):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
y = functions.select_item(x, t)
y_exp = cuda.to_cpu(x_data)[range(t_data.size), cuda.to_cpu(t_data)]
self.assertEqual(y.data.dtype, self.dtype)
numpy.testing.assert_equal(cuda.to_cpu(y.data), y_exp)
def test_forward_cpu(self):
self.check_forward(self.x_data, self.t_data)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x_data),
cuda.to_gpu(self.t_data))
def check_backward(self, x_data, t_data, gy_data):
gradient_check.check_backward(
functions.select_item,
(x_data, t_data), gy_data, eps=0.01, dtype='d',
**self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x_data, self.t_data, self.gy_data)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x_data),
cuda.to_gpu(self.t_data),
cuda.to_gpu(self.gy_data))
def check_double_backward(self, x_data, t_data, gy_data, ggx_data):
def f(x):
return functions.select_item(x, t_data)
gradient_check.check_double_backward(
f, x_data, gy_data, ggx_data, eps=0.01, dtype='d',
**self.check_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x_data, self.t_data,
self.gy_data, self.ggx_data)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(cuda.to_gpu(self.x_data),
cuda.to_gpu(self.t_data),
cuda.to_gpu(self.gy_data),
cuda.to_gpu(self.ggx_data))
@testing.parameterize(
{'t_value': -1, 'valid': False},
{'t_value': 3, 'valid': False},
{'t_value': 0, 'valid': True},
)
class TestSelectItemValueCheck(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (1, 2)).astype(numpy.float32)
self.t = numpy.array([self.t_value], dtype=numpy.int32)
self.original_debug = chainer.is_debug()
chainer.set_debug(True)
def tearDown(self):
chainer.set_debug(self.original_debug)
def check_value_check(self, x_data, t_data):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
if self.valid:
# Check if it throws nothing
functions.select_item(x, t)
else:
with self.assertRaises(ValueError):
functions.select_item(x, t)
def test_value_check_cpu(self):
self.check_value_check(self.x, self.t)
@attr.gpu
def test_value_check_gpu(self):
self.check_value_check(self.x, self.t)
testing.run_module(__name__, __file__)
| 4,229
| 31.538462
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_split_axis.py
|
import unittest
import numpy
import six
import chainer
from chainer import functions
from chainer import testing
from chainer.testing import backend
def inject_backend_tests():
decorator = backend.inject_backend_tests(
None,
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ [{'use_cuda': True}]
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
return decorator
@testing.parameterize(*testing.product_dict(
[
{'shape': (2, 7, 3), 'axis': 1, 'ys_section': [2, 5],
'slices': [
(slice(None), slice(None, 2)),
(slice(None), slice(2, 5)),
(slice(None), slice(5, None))]},
{'shape': (7, 3), 'axis': 0, 'ys_section': [2, 5],
'slices': [slice(None, 2), slice(2, 5), slice(5, None)]},
{'shape': (7, 0), 'axis': 0, 'ys_section': [2, 5],
'slices': [slice(None, 2), slice(2, 5), slice(5, None)]},
{'shape': (2, 9, 3), 'axis': 1, 'ys_section': 3,
'slices': [
(slice(None), slice(None, 3)),
(slice(None), slice(3, 6)),
(slice(None), slice(6, None))]},
{'shape': (2, 6, 3), 'axis': 1, 'ys_section': 3,
'slices': [
(slice(None), slice(None, 2)),
(slice(None), slice(2, 4)),
(slice(None), slice(4, None))]},
{'shape': (2,), 'axis': 0, 'ys_section': [1],
'slices': [slice(None, 1), slice(1, None)]},
{'shape': (2,), 'axis': 0, 'ys_section': [],
'slices': [slice(None, None)]},
{'shape': (2, 7, 3), 'axis': 1, 'ys_section': [2, 5],
'slices': [
(slice(None), slice(None, 2)),
(slice(None), slice(2, 5)),
(slice(None), slice(5, None))]},
{'shape': (2, 7, 3), 'axis': 1, 'ys_section': [0],
'slices': [
(slice(None), slice(None, 0)),
(slice(None), slice(0, 7))]
},
{'shape': (2, 7, 3), 'axis': 1, 'ys_section': [7],
'slices': [
(slice(None), slice(None, 7)),
(slice(None), slice(7, 7))]
},
{'shape': (2, 7, 3, 2), 'axis': 1, 'ys_section': [2, 5],
'slices': [
(slice(None), slice(None, 2)),
(slice(None), slice(2, 5)),
(slice(None), slice(5, None))]},
{'shape': (2, 7, 3, 2), 'axis': 1, 'ys_section': [0],
'slices': [
(slice(None), slice(None, 0)),
(slice(None), slice(0, 7))]
},
{'shape': (10, 4, 3, 2), 'axis': 0, 'ys_section': 1,
'slices': [slice(None, None)]
},
{'shape': (10, 4, 3, 2), 'axis': 0, 'ys_section': 2,
'slices': [slice(None, 5), slice(5, None)]
},
{'shape': (10, 4, 3, 2), 'axis': 0, 'ys_section': [],
'slices': [slice(None, None)]
},
{'shape': (10, 4, 3, 2), 'axis': 0, 'ys_section': [0, 5],
'slices': [slice(0, 0), slice(0, 5), slice(5, None)]
},
{'shape': (10, 4, 3, 2), 'axis': 0, 'ys_section': [0, 0, 5],
'slices': [slice(0, 0), slice(0, 0), slice(None, 5), slice(5, None)]
},
{'shape': (10, 4, 3, 2), 'axis': 0, 'ys_section': [2, 3, 5],
'slices': [slice(None, 2), slice(2, 3), slice(3, 5), slice(5, None)]
},
{'shape': (10, 4, 3, 2), 'axis': 0,
'ys_section': numpy.asarray([2, 3, 5]),
'slices': [slice(None, 2), slice(2, 3), slice(3, 5), slice(5, None)]
},
{'shape': (10, 4, 3, 2), 'axis': 0, 'ys_section': [2, 3, 3, 5],
'slices': [slice(None, 2), slice(2, 3), slice(3, 3), slice(3, 5),
slice(5, None)]
},
{'shape': (5, 5, 3, 8), 'axis': 3, 'ys_section': 2,
'slices': [
(slice(None, None), slice(None, None), slice(None, None),
slice(None, 4)),
(slice(None, None), slice(None, None), slice(None, None),
slice(4, None))]
},
{'shape': (5, 8, 3, 2), 'axis': -3, 'ys_section': 2,
'slices': [(slice(None, None), slice(None, 4)),
(slice(None, None), slice(4, None))]
},
{'shape': (5, 8, 3, 2), 'axis': 1, 'ys_section': 2,
'slices': [(slice(None, None), slice(None, 4)),
(slice(None, None), slice(4, None))]
},
{'shape': (5, 4, 3, 4), 'axis': -1, 'ys_section': 2,
'slices': [
(slice(None, None), slice(None, None), slice(None, None),
slice(None, 2)),
(slice(None, None), slice(None, None), slice(None, None),
slice(2, None))]
},
{'shape': (10, 4, 3, 2), 'axis': 0, 'ys_section': numpy.array([]),
'slices': [slice(None, None)]
},
# Functions with multiple outputs may receive `None` upstream gradients
# in their backward method, `split_axis` must handle this case
# (by constructing 0-filled variables for `None` gradients).
{'shape': (2, 7, 3), 'axis': 1, 'ys_section': [2, 5],
'slices': [
(slice(None), slice(None, 2)),
(slice(None), slice(2, 5)),
(slice(None), slice(5, None))],
'grad_outputs_is_none': [False, True, False]
},
{'shape': (2, 7, 3, 1), 'axis': 1, 'ys_section': [2, 5],
'slices': [
(slice(None), slice(None, 2)),
(slice(None), slice(2, 5)),
(slice(None), slice(5, None))],
'grad_outputs_is_none': [False, True, False]
},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
@inject_backend_tests()
class TestSplitAxis(testing.FunctionTestCase):
# A list of booleans. If element i is `True`, the i-th upstream gradient is
# generated as `None`. Default is `None`, in which case all gradients are
# ndarrays.
grad_outputs_is_none = None
def generate_inputs(self):
shape = self.shape
dtype = self.dtype
x = numpy.arange(numpy.prod(shape), dtype=dtype).reshape(shape)
return x,
def generate_grad_outputs(self, outputs_template):
grad_outputs = tuple([
numpy.random.uniform(-1, 1, a.shape).astype(a.dtype)
for a in outputs_template])
if self.grad_outputs_is_none is not None:
assert len(self.grad_outputs_is_none) == len(grad_outputs)
grad_outputs = tuple(
None if is_none else g for is_none, g,
in six.moves.zip(self.grad_outputs_is_none, grad_outputs))
return grad_outputs
def forward(self, inputs, device):
x, = inputs
return functions.split_axis(
x, self.ys_section, self.axis, force_tuple=True)
def forward_expected(self, inputs):
x, = inputs
return tuple([x[s] for s in self.slices])
@inject_backend_tests()
class TestSplitAxisNone(testing.FunctionTestCase):
skip_double_backward_test = True
axis = 0
ys_section = [1]
def generate_inputs(self):
x = numpy.array([1, 2], dtype=numpy.float32)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.split_axis(
x, self.ys_section, self.axis)
def forward_expected(self, inputs):
x, = inputs
return tuple(numpy.split(x, self.ys_section, self.axis))
@testing.parameterize(
{'force_tuple': True},
{'force_tuple': False},
)
@inject_backend_tests()
class TestSplitAxisForceArray(testing.FunctionTestCase):
skip_backward_test = True
skip_double_backward_test = True
axis = 1
def generate_inputs(self):
x = numpy.arange(42, dtype=numpy.float32).reshape(2, 7, 3)
return x,
def forward(self, inputs, device):
x, = inputs
ret = functions.split_axis(
x, 1, self.axis, force_tuple=self.force_tuple)
if self.force_tuple:
assert isinstance(ret, tuple)
assert len(ret) == 1
return ret
else:
assert isinstance(ret, chainer.Variable)
return ret,
def forward_expected(self, inputs):
x, = inputs
return tuple(numpy.split(x, 1, self.axis))
class TestSplitAxisInvalidSections(unittest.TestCase):
def setUp(self):
self.default_debug = chainer.is_debug()
chainer.set_debug(True)
def tearDown(self):
chainer.set_debug(self.default_debug)
def test_invalid_sections(self):
x = numpy.zeros((2, 3, 4), dtype='f')
with self.assertRaises(ValueError):
functions.split_axis(x, [2, 1], 1)
testing.run_module(__name__, __file__)
| 9,026
| 33.193182
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_get_item.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
_backend_params = (
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ [{'use_cuda': True}]
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
@testing.inject_backend_tests(None, _backend_params)
@testing.parameterize(*testing.product_dict(
[{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
[{'axes': [1, 2], 'offsets': 0},
{'axes': [1, 2], 'offsets': [0, 1, 1]},
{'axes': 1, 'offsets': 1},
{'axes': 1, 'offsets': [0, 1, 1]},
{'axes': [], 'offsets': 0, 'new_axes': 0},
{'axes': [], 'offsets': 0, 'new_axes': 2},
{'axes': [], 'offsets': 0, 'new_axes': 3},
{'slices': (1, -1, 0)},
{'slices': (1, -1)},
{'slices': (1, Ellipsis, -1)},
{'slices': (1, None, Ellipsis, None, -1)},
]
))
class TestGetItem(testing.FunctionTestCase):
def setUp(self):
shape = (4, 2, 1)
if not hasattr(self, 'slices'):
axes = self.axes
offsets = self.offsets
# Convert axes, offsets and shape to slices
if isinstance(offsets, int):
offsets = tuple([offsets] * len(shape))
if isinstance(axes, int):
axes = tuple([axes])
slices = [slice(None)] * len(shape)
for axis in axes:
slices[axis] = slice(
offsets[axis], offsets[axis] + shape[axis])
if hasattr(self, 'new_axes'):
slices.insert(self.new_axes, None)
self.axes = axes
self.offsets = offsets
self.slices = tuple(slices)
self.check_backward_options.update({'atol': 5e-4, 'rtol': 5e-4})
self.check_double_backward_options.update({'atol': 1e-3, 'rtol': 1e-3})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, (4, 3, 2)).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.get_item(x, self.slices)
return y,
def forward_expected(self, inputs):
x, = inputs
y = x[self.slices]
return numpy.asarray(y),
@testing.inject_backend_tests(None, _backend_params)
@testing.parameterize(*testing.product_dict(
[{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
[{'slices': []},
{'slices': ([],)},
{'slices': ([[]],)},
{'slices': numpy.array([], dtype=numpy.bool)},
{'slices': (1, [1])},
{'slices': ([1], slice(1, 2))},
{'slices': [1, 0]},
{'slices': ([1, 0],)},
{'slices': numpy.array([[1, 0], [2, 3]])},
{'slices': ([1, 0], [1, 1])},
{'slices': ([1, 0], slice(None), [[1, 1], [1, 1]])},
{'slices': ([1, 0], slice(1, 2), [0, 0])},
{'slices': ([[1, 1], [1, 0]], slice(1, 2), 1)},
{'slices': numpy.array([True] * 18 + [False] * 6).reshape(4, 3, 2)},
{'slices': numpy.array([True, False, False, True])},
{'slices': (slice(None), numpy.array([True, False, True]))},
{'slices': numpy.array([False, False, False, False])},
{'slices': (3, 2, Ellipsis, 1)},
{'slices': (numpy.array(False)), 'input_shape': ()},
{'slices': (numpy.array(True)), 'input_shape': ()},
]
))
class TestGetItemAdvanced(testing.FunctionTestCase):
input_shape = (4, 3, 2)
def setUp(self):
self.check_backward_options.update({'atol': 5e-4, 'rtol': 5e-4})
self.check_double_backward_options.update({'atol': 1e-3, 'rtol': 1e-3})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.input_shape).astype(self.dtype)
return x,
def _convert_slices(self, slices, device):
# Converts advanced indexing slices (of numpy.ndarray) to respective
# backend arrays.
if isinstance(slices, list):
return [self._convert_slices(a, device) for a in slices]
if isinstance(slices, tuple):
return tuple([self._convert_slices(a, device) for a in slices])
if isinstance(slices, numpy.ndarray):
return device.send(slices)
return slices
def forward(self, inputs, device):
x, = inputs
slices = self._convert_slices(self.slices, device)
y = functions.get_item(x, slices)
return y,
def forward_expected(self, inputs):
x, = inputs
y = x[self.slices]
return numpy.asarray(y),
@testing.parameterize(
{'slices': ([1, 0], [1, 1]), 'sliced_shape': (2, 2)},
{'slices': ([1, 0], slice(None), [[1, 1], [1, 1]]),
'sliced_shape': (2, 2, 3)},
{'slices': ([1, 0], [1, 1], [0, 0]), 'sliced_shape': (2,)},
{'slices': (slice(None), numpy.array([True, False, True])),
'sliced_shape': (4, 2, 2)},
)
class TestCupyIndicesGetItem(unittest.TestCase):
def setUp(self):
self.x_data = numpy.random.uniform(
-1, 1, (4, 3, 2)).astype(numpy.float32)
self.gy_data = numpy.random.uniform(
-1, 1, self.sliced_shape).astype(numpy.float32)
def check_forward(self, x_data):
slices = []
for i, s in enumerate(self.slices):
if isinstance(s, numpy.ndarray):
s = chainer.backends.cuda.cupy.array(s)
if isinstance(s, list):
s = chainer.backends.cuda.cupy.array(s, dtype=numpy.int32)
slices.append(s)
slices = tuple(slices)
x = chainer.Variable(x_data)
y = functions.get_item(x, slices)
self.assertEqual(y.data.dtype, numpy.float32)
numpy.testing.assert_equal(cuda.to_cpu(x_data)[self.slices],
cuda.to_cpu(y.data))
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x_data))
def check_backward(self, x_data, y_grad):
slices = []
for i, s in enumerate(self.slices):
if isinstance(s, numpy.ndarray):
s = chainer.backends.cuda.cupy.array(s)
if isinstance(s, list):
s = chainer.backends.cuda.cupy.array(s, dtype=numpy.int32)
slices.append(s)
slices = tuple(slices)
def f(x):
return functions.get_item(x, slices)
gradient_check.check_backward(
f, (x_data,), y_grad, dtype='d')
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x_data),
cuda.to_gpu(self.gy_data))
class TestInvalidGetItem(unittest.TestCase):
def setUp(self):
self.default_debug = chainer.is_debug()
chainer.set_debug(True)
self.x_data = numpy.random.uniform(-1, 1, (4, 3, 2))
def tearDown(self):
chainer.set_debug(self.default_debug)
def test_multiple_ellipsis(self):
with self.assertRaises(ValueError):
functions.get_item(self.x_data, (Ellipsis, Ellipsis))
testing.run_module(__name__, __file__)
| 7,353
| 30.973913
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_expand_dims.py
|
import numpy
from chainer.backends import cuda
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product_dict(
[
{'in_shape': (3, 2), 'out_shape': (1, 3, 2), 'axis': 0},
{'in_shape': (3, 2), 'out_shape': (3, 1, 2), 'axis': 1},
{'in_shape': (3, 2), 'out_shape': (3, 2, 1), 'axis': 2},
{'in_shape': (3, 2), 'out_shape': (3, 2, 1), 'axis': -1},
{'in_shape': (3, 2), 'out_shape': (3, 1, 2), 'axis': -2},
{'in_shape': (3, 2), 'out_shape': (1, 3, 2), 'axis': -3},
{'in_shape': (3, 2), 'out_shape': (1, 3, 2), 'axis': 0},
{'in_shape': (3, 2), 'out_shape': (1, 3, 2), 'axis': 0},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestExpandDims(testing.FunctionTestCase):
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.in_shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
y_expect = numpy.expand_dims(cuda.to_cpu(x), self.axis)
return y_expect,
def forward(self, inputs, device):
x, = inputs
y = functions.expand_dims(x, self.axis)
return y,
testing.run_module(__name__, __file__)
| 1,672
| 26.42623
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_flipud.py
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(3,), (3, 4)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestFlipUD(testing.FunctionTestCase):
def generate_inputs(self):
x = numpy.random.uniform(0, 1, self.shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
y = numpy.flipud(x)
return y,
def forward(self, inputs, device):
x, = inputs
return functions.flipud(x),
testing.run_module(__name__, __file__)
| 992
| 20.586957
| 69
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_broadcast.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
import chainerx
@testing.parameterize(*testing.product_dict(
[
{'in_shapes': [(3, 1, 4), (1, 2, 4)], 'out_shape': (3, 2, 4)},
{'in_shapes': [(3, 2, 4), (4,)], 'out_shape': (3, 2, 4)},
{'in_shapes': [(3, 2, 4), ()], 'out_shape': (3, 2, 4)},
{'in_shapes': [(3, 2, 4), (3, 2, 4)], 'out_shape': (3, 2, 4)},
{'in_shapes': [(), ()], 'out_shape': ()},
{'in_shapes': [(1, 1, 1), (1,)], 'out_shape': (1, 1, 1)},
{'in_shapes': [(1, 1, 1), ()], 'out_shape': (1, 1, 1)},
{'in_shapes': [(3, 2, 4)], 'out_shape': (3, 2, 4)},
{'in_shapes': [(3, 1, 4), (1, 2, 4), (3, 2, 1)],
'out_shape': (3, 2, 4)},
{'in_shapes': [(1, 0, 1), (2,)], 'out_shape': (1, 0, 2)},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
class TestBroadcast(unittest.TestCase):
def setUp(self):
uniform = numpy.random.uniform
self.data = [uniform(0, 1, shape).astype(self.dtype)
for shape in self.in_shapes]
self.grads = [uniform(0, 1, self.out_shape).astype(self.dtype)
for _ in range(len(self.in_shapes))]
self.gg = [uniform(0, 1, shape).astype(self.dtype)
for shape in self.in_shapes]
self.check_backward_options = {}
self.check_double_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options = {'atol': 1e-2, 'rtol': 1e-1}
self.check_double_backward_options = {'atol': 1e-2, 'rtol': 1e-1}
def check_forward(self, data):
xs = [chainer.Variable(x) for x in data]
bxs = functions.broadcast(*xs)
# When len(xs) == 1, function returns a Variable object
if isinstance(bxs, chainer.Variable):
bxs = (bxs,)
for bx in bxs:
self.assertEqual(bx.data.shape, self.out_shape)
self.assertEqual(bx.data.dtype, self.dtype)
def test_forward_cpu(self):
self.check_forward(self.data)
@attr.gpu
def test_forward_gpu(self):
self.check_forward([cuda.to_gpu(x) for x in self.data])
def check_backward(self, data, grads):
def f(*xs):
return functions.broadcast(*xs)
gradient_check.check_backward(
f, data, grads, dtype=numpy.float64, **self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.data, self.grads)
@attr.gpu
def test_backward_gpu(self):
self.check_backward([cuda.to_gpu(x) for x in self.data],
[cuda.to_gpu(x) for x in self.grads])
def check_double_backward(self, data, grads, gg):
if len(data) == 1:
return
gradient_check.check_double_backward(
functions.broadcast, data, grads, gg, dtype=numpy.float64,
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.data, self.grads, self.gg)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward([cuda.to_gpu(x) for x in self.data],
[cuda.to_gpu(x) for x in self.grads],
[cuda.to_gpu(x) for x in self.gg])
class TestBroadcastTypeError(unittest.TestCase):
def test_invalid_shape(self):
x_data = numpy.zeros((3, 2, 5), dtype=numpy.int32)
y_data = numpy.zeros((1, 3, 4), dtype=numpy.float32)
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
with self.assertRaises(type_check.InvalidType):
functions.broadcast(x, y)
def test_invalid_shape_fill(self):
x_data = numpy.zeros((3, 2, 5), dtype=numpy.int32)
y_data = numpy.zeros(4, dtype=numpy.float32)
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
with self.assertRaises(type_check.InvalidType):
functions.broadcast(x, y)
def test_no_args(self):
with self.assertRaises(type_check.InvalidType):
functions.broadcast()
@testing.parameterize(*testing.product_dict(
[
{'in_shape': (3, 1, 4), 'out_shape': (3, 2, 4)},
{'in_shape': (4,), 'out_shape': (3, 2, 4)},
{'in_shape': (3, 2, 4), 'out_shape': (3, 2, 4)},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
class TestBroadcastTo(unittest.TestCase):
def setUp(self):
uniform = numpy.random.uniform
self.data = uniform(0, 1, self.in_shape).astype(self.dtype)
self.grad = uniform(0, 1, self.out_shape).astype(self.dtype)
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options = {
'eps': 2 ** -5, 'atol': 1e-2, 'rtol': 1e-1}
def check_forward(self, data):
x = chainer.Variable(data)
bx = functions.broadcast_to(x, self.out_shape)
self.assertEqual(bx.data.shape, self.out_shape)
def test_forward_cpu(self):
self.check_forward(self.data)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.data))
@attr.chainerx
def test_forward_chainerx(self):
self.check_forward(chainerx.array(self.data))
def check_backward(self, data, grads):
gradient_check.check_backward(
lambda x: functions.broadcast_to(x, self.out_shape), data, grads,
dtype=numpy.float64, **self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.data, self.grad)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.data), cuda.to_gpu(self.grad))
@attr.chainerx
def test_backward_chainerx(self):
self.check_backward(
chainerx.array(self.data), chainerx.array(self.grad))
@testing.parameterize(
{'in_shape': (3, 2, 4), 'out_shape': (4,)},
{'in_shape': (3, 2, 4), 'out_shape': (3, 1, 4)},
{'in_shape': (3, 2, 4), 'out_shape': (1, 3, 2, 3)},
)
class TestBroadcastToTypeCheck(unittest.TestCase):
def setUp(self):
uniform = numpy.random.uniform
self.data = uniform(0, 1, self.in_shape).astype(numpy.float32)
def test_type_check(self):
x = chainer.Variable(self.data)
with self.assertRaises(type_check.InvalidType):
functions.broadcast_to(x, self.out_shape)
class TestBroadcastToSkip(unittest.TestCase):
shape = (2, 3)
def setUp(self):
self.data = numpy.random.uniform(0, 1, self.shape)
def test_ndarray(self):
ret = functions.broadcast_to(self.data, self.shape)
self.assertIs(self.data, ret.data)
def test_variable(self):
x = chainer.Variable(self.data)
ret = functions.broadcast_to(x, self.shape)
self.assertIs(x, ret)
testing.run_module(__name__, __file__)
| 7,210
| 31.628959
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_space_2_depth.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestSpace2Depth(unittest.TestCase):
def setUp(self):
self.depth = numpy.arange(96).reshape(2, 8, 3, 2).astype(self.dtype)
self.space = numpy.array([[[[0., 12., 1., 13.],
[24., 36., 25., 37.],
[2., 14., 3., 15.],
[26., 38., 27., 39.],
[4., 16., 5., 17.],
[28., 40., 29., 41.]],
[[6., 18., 7., 19.],
[30., 42., 31., 43.],
[8., 20., 9., 21.],
[32., 44., 33., 45.],
[10., 22., 11., 23.],
[34., 46., 35., 47.]]],
[[[48., 60., 49., 61.],
[72., 84., 73., 85.],
[50., 62., 51., 63.],
[74., 86., 75., 87.],
[52., 64., 53., 65.],
[76., 88., 77., 89.]],
[[54., 66., 55., 67.],
[78., 90., 79., 91.],
[56., 68., 57., 69.],
[80., 92., 81., 93.],
[58., 70., 59., 71.],
[82., 94., 83., 95.]]]]
).astype(self.dtype)
self.x = numpy.random.randn(2, 2, 6, 4).astype(self.dtype)
self.gy = numpy.random.randn(2, 8, 3, 2).astype(self.dtype)
self.ggx = numpy.random.randn(2, 2, 6, 4).astype(self.dtype)
self.r = 2
self.check_backward_options = {}
self.check_double_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
def check_forward(self, space_data, depth_data):
space = chainer.Variable(space_data)
s2d = functions.space2depth(space, self.r)
s2d_value = cuda.to_cpu(s2d.data)
self.assertEqual(s2d_value.dtype, self.dtype)
self.assertEqual(s2d_value.shape, (2, 8, 3, 2))
s2d_expect = depth_data
testing.assert_allclose(s2d_value, s2d_expect)
def test_forward_cpu(self):
self.check_forward(self.space, self.depth)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.space), cuda.to_gpu(self.depth))
def check_backward(self, x_data, y_grad):
def f(x):
return functions.space2depth(x, self.r)
gradient_check.check_backward(f, x_data, y_grad, dtype=numpy.float64,
**self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, x_grad_grad):
def f(x):
return functions.space2depth(x, self.r)
gradient_check.check_double_backward(
f, x_data, y_grad, x_grad_grad, dtype=numpy.float64,
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx))
testing.run_module(__name__, __file__)
| 4,171
| 38.358491
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_fliplr.py
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(3, 4), (3, 4, 2)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestFlipLR(testing.FunctionTestCase):
def generate_inputs(self):
x = numpy.random.uniform(0, 1, self.shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
return numpy.fliplr(x),
def forward(self, inputs, device):
x, = inputs
return functions.fliplr(x),
testing.run_module(__name__, __file__)
| 983
| 20.866667
| 69
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_depth_2_space.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestDepth2Space(unittest.TestCase):
def setUp(self):
self.depth = numpy.arange(96).reshape(2, 8, 3, 2).astype(self.dtype)
self.space = numpy.array([[[[0., 12., 1., 13.],
[24., 36., 25., 37.],
[2., 14., 3., 15.],
[26., 38., 27., 39.],
[4., 16., 5., 17.],
[28., 40., 29., 41.]],
[[6., 18., 7., 19.],
[30., 42., 31., 43.],
[8., 20., 9., 21.],
[32., 44., 33., 45.],
[10., 22., 11., 23.],
[34., 46., 35., 47.]]],
[[[48., 60., 49., 61.],
[72., 84., 73., 85.],
[50., 62., 51., 63.],
[74., 86., 75., 87.],
[52., 64., 53., 65.],
[76., 88., 77., 89.]],
[[54., 66., 55., 67.],
[78., 90., 79., 91.],
[56., 68., 57., 69.],
[80., 92., 81., 93.],
[58., 70., 59., 71.],
[82., 94., 83., 95.]]]]
).astype(self.dtype)
self.x = numpy.random.randn(2, 8, 3, 2).astype(self.dtype)
self.gy = numpy.random.randn(2, 2, 6, 4).astype(self.dtype)
self.ggx = numpy.random.randn(2, 8, 3, 2).astype(self.dtype)
self.r = 2
self.check_backward_options = {}
self.check_double_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
def check_forward(self, depth_data, space_data):
depth = chainer.Variable(depth_data)
d2s = functions.depth2space(depth, self.r)
d2s_value = cuda.to_cpu(d2s.data)
self.assertEqual(d2s_value.dtype, self.dtype)
self.assertEqual(d2s_value.shape, (2, 2, 6, 4))
d2s_expect = space_data
testing.assert_allclose(d2s_value, d2s_expect)
def test_forward_cpu(self):
self.check_forward(self.depth, self.space)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.depth), cuda.to_gpu(self.space))
def check_backward(self, x_data, y_grad):
def f(x):
return functions.depth2space(x, self.r)
gradient_check.check_backward(f, x_data, y_grad, dtype=numpy.float64,
**self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, x_grad_grad):
def f(x):
return functions.depth2space(x, self.r)
gradient_check.check_double_backward(
f, x_data, y_grad, x_grad_grad, dtype=numpy.float64,
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx))
testing.run_module(__name__, __file__)
| 4,171
| 38.358491
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_transpose_sequence.py
|
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(
{'shape': (), 'lengths': [4, 2, 1], 'trans_lengths': [3, 2, 1, 1]},
{'shape': (3,), 'lengths': [4, 2, 1], 'trans_lengths': [3, 2, 1, 1]},
{'shape': (), 'lengths': [0, 0], 'trans_lengths': []},
{'shape': (3,), 'lengths': [4, 2, 0], 'trans_lengths': [2, 2, 1, 1]},
{'shape': (3,), 'lengths': [], 'trans_lengths': []},
)
class TestTransposeSequence(unittest.TestCase):
def setUp(self):
self.xs = [numpy.random.uniform(-1, 1, (length,) + self.shape)
for length in self.lengths]
self.gs = [numpy.random.uniform(-1, 1, (length,) + self.shape)
for length in self.trans_lengths]
def check_forward(self, xs_data):
xs = [chainer.Variable(x) for x in xs_data]
ys = functions.transpose_sequence(xs)
self.assertEqual(len(ys), len(self.trans_lengths))
for y, l in zip(ys, self.trans_lengths):
self.assertEqual(len(y.data), l)
for i, l in enumerate(self.trans_lengths):
for j in six.moves.range(l):
testing.assert_allclose(ys[i].data[j], self.xs[j][i])
def test_forward_cpu(self):
self.check_forward(self.xs)
@attr.gpu
def test_forward_gpu(self):
self.check_forward([cuda.to_gpu(x) for x in self.xs])
def check_backward(self, xs_data, gs_data):
# In this situation the function returns no result
if len(self.trans_lengths) == 0:
return
def f(*xs):
return functions.transpose_sequence(xs)
gradient_check.check_backward(
f, tuple(xs_data), tuple(gs_data))
def test_backward_cpu(self):
self.check_backward(self.xs, self.gs)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
[cuda.to_gpu(x) for x in self.xs],
[cuda.to_gpu(g) for g in self.gs])
testing.run_module(__name__, __file__)
| 2,143
| 30.072464
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_reshape.py
|
import unittest
import numpy
import chainer
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'in_shape': [(4, 3, 2)],
'out_shape': [(2, 2, 6), (2, -1, 6)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
class TestReshape(testing.FunctionTestCase):
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.in_shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
y_expect = x.reshape(self.out_shape)
return y_expect,
def forward(self, inputs, device):
x, = inputs
y = functions.reshape(x, self.out_shape)
return y,
class TestReshapeSkip(unittest.TestCase):
shape = (2, 3)
def setUp(self):
self.data = numpy.random.uniform(0, 1, self.shape)
def test_ndarray(self):
ret = functions.reshape(self.data, self.shape)
self.assertIs(self.data, ret.data)
def test_variable(self):
x = chainer.Variable(self.data)
ret = functions.reshape(x, self.shape)
self.assertIs(x, ret)
testing.run_module(__name__, __file__)
| 1,578
| 22.220588
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_dstack.py
|
import unittest
import numpy
import six
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
@testing.parameterize(*testing.product_dict(
[
{'shape': (2, 3, 4), 'y_shape': (2, 3, 8), 'xs_length': 2},
{'shape': (3, 4), 'y_shape': (3, 4, 2), 'xs_length': 2},
{'shape': (3), 'y_shape': (1, 3, 2), 'xs_length': 2},
{'shape': (), 'y_shape': (1, 1, 2), 'xs_length': 2},
{'shape': (2, 3, 4), 'y_shape': (2, 3, 4), 'xs_length': 1},
{'shape': (3, 4), 'y_shape': (3, 4, 1), 'xs_length': 1},
{'shape': (3), 'y_shape': (1, 3, 1), 'xs_length': 1},
{'shape': (), 'y_shape': (1, 1, 1), 'xs_length': 1},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestDstack(testing.FunctionTestCase):
def setUp(self):
self.check_double_backward_options = {}
if self.dtype == numpy.float16:
self.check_double_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
def generate_inputs(self):
xs = tuple([
numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
for i in six.moves.range(self.xs_length)
])
return xs
def forward_expected(self, inputs):
xs = inputs
y_expected = numpy.dstack(xs)
return y_expected,
def forward(self, inputs, device):
xs = inputs
y = functions.dstack(xs)
return y,
@testing.parameterize(
{'a_shape': (2, 3, 4, 6), 'b_shape': (2, 3, 4, 5), 'valid': False},
{'a_shape': (2, 3, 5, 6), 'b_shape': (2, 3, 4, 6), 'valid': True},
{'a_shape': (2, 4, 5), 'b_shape': (3, 4, 5), 'valid': False},
{'a_shape': (3, 4, 6), 'b_shape': (3, 4, 5), 'valid': True},
{'a_shape': (3, 6, 5), 'b_shape': (3, 4, 5), 'valid': False},
{'a_shape': (3, 4), 'b_shape': (4, 4), 'valid': False},
{'a_shape': (3, 4), 'b_shape': (3, 3), 'valid': False},
{'a_shape': (3,), 'b_shape': (4,), 'valid': False},
{'a_shape': (3), 'b_shape': (3, 3), 'valid': False},
{'a_shape': (), 'b_shape': (1), 'valid': False},
)
class TestDstackTypeCheck(unittest.TestCase):
def setUp(self):
self.xs = [
numpy.random.uniform(-1, 1, self.a_shape).astype(numpy.float32),
numpy.random.uniform(-1, 1, self.b_shape).astype(numpy.float32),
]
def check_value_check(self):
if self.valid:
# Check if it throws nothing
functions.dstack(self.xs)
else:
with self.assertRaises(type_check.InvalidType):
functions.dstack(self.xs)
def test_value_check_cpu(self):
self.check_value_check()
@attr.gpu
def test_value_check_gpu(self):
self.check_value_check()
testing.run_module(__name__, __file__)
| 3,262
| 28.93578
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_permutate.py
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import force_array
@testing.parameterize(*testing.product_dict(
[{'shape': (3,), 'dtype': 'f', 'axis': 0, 'inv': False},
{'shape': (3,), 'dtype': 'f', 'axis': -1, 'inv': True},
{'shape': (3, 4), 'dtype': 'd', 'axis': 1, 'inv': True},
{'shape': (3, 4, 5), 'dtype': 'f', 'axis': 2, 'inv': False}],
[{'label_dtype': numpy.int8},
{'label_dtype': numpy.int16},
{'label_dtype': numpy.int32},
{'label_dtype': numpy.int64}]
))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestPermutate(testing.FunctionTestCase):
def setUp(self):
self.skip_double_backward_test = True
self.check_backward_options.update({'atol': 1e-3, 'rtol': 1e-3})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
indices = numpy.random.permutation(
self.shape[self.axis]).astype(self.label_dtype)
return x, indices
def forward(self, inputs, device):
x, indices = inputs
y = functions.permutate(x, indices, axis=self.axis, inv=self.inv)
return y,
def forward_expected(self, inputs):
x, indices = inputs
if self.inv:
indices = numpy.argsort(indices)
expected = numpy.take(x, indices, axis=self.axis)
expected = force_array(expected)
return expected,
@testing.parameterize(
{'indices': [0, 0]},
{'indices': [-1, 0]},
{'indices': [0, 2]},
)
class TestPermutateInvalidIndices(unittest.TestCase):
def setUp(self):
self.x = numpy.arange(10).reshape((2, 5)).astype('f')
self.ind = numpy.array(self.indices, 'i')
self.debug = chainer.is_debug()
chainer.set_debug(True)
def tearDown(self):
chainer.set_debug(self.debug)
def check_invalid(self, x_data, ind_data):
x = chainer.Variable(x_data)
ind = chainer.Variable(ind_data)
with self.assertRaises(ValueError):
functions.permutate(x, ind)
def test_invlaid_cpu(self):
self.check_invalid(self.x, self.ind)
@attr.gpu
def test_invlaid_gpu(self):
self.check_invalid(cuda.to_gpu(self.x), cuda.to_gpu(self.ind))
testing.run_module(__name__, __file__)
| 2,760
| 26.888889
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_stack.py
|
import unittest
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
import chainerx
@testing.parameterize(*testing.product_dict(
[
{'shape': (3, 4), 'axis': 0, 'y_shape': (2, 3, 4)},
{'shape': (3, 4), 'axis': 1, 'y_shape': (3, 2, 4)},
{'shape': (3, 4), 'axis': 2, 'y_shape': (3, 4, 2)},
{'shape': (3, 4), 'axis': -1, 'y_shape': (3, 4, 2)},
{'shape': (3, 4), 'axis': -2, 'y_shape': (3, 2, 4)},
{'shape': (3, 4), 'axis': -3, 'y_shape': (2, 3, 4)},
{'shape': (), 'axis': 0, 'y_shape': (2,)},
{'shape': (), 'axis': -1, 'y_shape': (2,)},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
]
))
class TestStack(unittest.TestCase):
def setUp(self):
self.xs = [
numpy.random.uniform(-1, 1, self.shape).astype(self.dtype),
numpy.random.uniform(-1, 1, self.shape).astype(self.dtype),
]
self.g = numpy.random.uniform(-1, 1, self.y_shape).astype(self.dtype)
self.ggs = [
numpy.random.uniform(-1, 1, self.shape).astype(self.dtype),
numpy.random.uniform(-1, 1, self.shape).astype(self.dtype),
]
def check_forward(self, xs_data):
xs = [chainer.Variable(x) for x in xs_data]
y = functions.stack(xs, axis=self.axis)
if hasattr(numpy, 'stack'):
# run test only with numpy>=1.10
expect = numpy.stack(self.xs, axis=self.axis)
testing.assert_allclose(y.data, expect)
y_data = backend.CpuDevice().send(y.data)
self.assertEqual(y_data.shape[self.axis], 2)
numpy.testing.assert_array_equal(
y_data.take(0, axis=self.axis), self.xs[0])
numpy.testing.assert_array_equal(
y_data.take(1, axis=self.axis), self.xs[1])
def test_forward_cpu(self):
self.check_forward(self.xs)
@attr.gpu
def test_forward_gpu(self):
self.check_forward([cuda.to_gpu(x) for x in self.xs])
@attr.chainerx
def test_forward_chainerx(self):
self.check_forward([chainerx.array(x) for x in self.xs])
def check_backward(self, xs_data, g_data):
def func(*xs):
return functions.stack(xs, self.axis)
gradient_check.check_backward(
func, xs_data, g_data, eps=2.0 ** -2, dtype='d')
def test_backward_cpu(self):
self.check_backward(self.xs, self.g)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
[cuda.to_gpu(x) for x in self.xs], cuda.to_gpu(self.g))
@attr.chainerx
def test_backward_chainerx(self):
self.check_backward(
[chainerx.array(x) for x in self.xs], chainerx.array(self.g))
def check_double_backward(self, xs_data, g_data, ggs_data):
def func(*xs):
return functions.stack(xs, self.axis)
gradient_check.check_double_backward(
func, xs_data, g_data, ggs_data, dtype=numpy.float64)
def test_double_backward_cpu(self):
self.check_double_backward(self.xs, self.g, self.ggs)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.xs), cuda.to_gpu(self.g), cuda.to_gpu(self.ggs))
@attr.chainerx
def test_double_backward_chainerx(self):
self.check_double_backward(
backend.to_chx(self.xs),
backend.to_chx(self.g),
backend.to_chx(self.ggs))
testing.run_module(__name__, __file__)
| 3,692
| 30.836207
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_transpose.py
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'in_shape': [(4, 3, 2)],
'axes': [(-1, 0, 1), None],
'dtype': [numpy.float16, numpy.float32, numpy.float32],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestTranspose(testing.FunctionTestCase):
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.in_shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
return x.transpose(self.axes),
def forward(self, inputs, device):
x, = inputs
y = functions.transpose(x, self.axes)
return y,
testing.run_module(__name__, __file__)
| 1,052
| 21.404255
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_tile.py
|
import unittest
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'in_shape': [(), 2, (2, 3)],
'reps': [(), 0, 2, (0, 0), (1, 2), (2, 2), (2, 0)],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestTile(testing.FunctionTestCase):
def setUp(self):
self.check_forward_options = {}
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_forward_options = ({'atol': 5e-4, 'rtol': 5e-3})
self.check_backward_options = ({'atol': 2 ** -4, 'rtol': 2 ** -4})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.in_shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
y_expected = numpy.tile(x, self.reps)
return y_expected,
def forward(self, inputs, devices):
x, = inputs
y = functions.tile(x, self.reps)
return y,
@testing.parameterize(*testing.product({
'reps': [-1, (-1, -1)],
}))
class TestTileValueError(unittest.TestCase):
def test_value_error(self):
x = numpy.random.uniform(-1, 1, (2,)).astype(numpy.float32)
with self.assertRaises(ValueError):
functions.tile(x, self.reps)
class TestTileTypeError(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2,)).astype(numpy.float32)
def test_reps_not_int(self):
with self.assertRaises(TypeError):
functions.tile(self.x, 'a')
def test_x_not_ndarray_or_variable(self):
with self.assertRaises(TypeError):
functions.tile((self.x, self.x), 2)
testing.run_module(__name__, __file__)
| 2,118
| 24.841463
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_moveaxis.py
|
import unittest
import numpy
import six
from chainer import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
def _normalize_axis_tuple(axis, ndim):
if numpy.isscalar(axis):
axis = (axis,)
ret = []
for ax in axis:
ret.append(ax % ndim)
return ret
def _moveaxis(a, source, destination):
if hasattr(numpy, 'moveaxis'):
return numpy.moveaxis(a, source, destination)
source = _normalize_axis_tuple(source, a.ndim)
destination = _normalize_axis_tuple(destination, a.ndim)
order = [n for n in six.moves.range(a.ndim) if n not in source]
for dest, src in sorted(six.moves.zip(destination, source)):
order.insert(dest, src)
result = a.transpose(order)
return result
@testing.parameterize(
{'source': 0, 'destination': -1, 'out_shape': (3, 4, 2)},
{'source': -1, 'destination': 1, 'out_shape': (2, 4, 3)},
{'source': (0, 2), 'destination': (1, 0), 'out_shape': (4, 2, 3)},
{'source': (0, -1), 'destination': (-1, 1), 'out_shape': (3, 4, 2)},
)
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestMoveaxis(testing.FunctionTestCase):
dtype = numpy.float32
def setUp(self):
self.check_backward_options = {}
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
y = functions.moveaxis(x, self.source, self.destination)
return y,
def forward_expected(self, inputs):
x, = inputs
y_expect = _moveaxis(x, self.source, self.destination)
return y_expect,
@testing.parameterize(
{'source': 4, 'destination': 0},
{'source': 0, 'destination': 4},
{'source': 0, 'destination': -4},
{'source': -4, 'destination': 0},
)
class TestMoveaxisInvalidType(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
def check_type_error(self, x):
with self.assertRaises(type_check.InvalidType):
functions.moveaxis(x, self.source, self.destination)
def test_type_error_cpu(self):
self.check_type_error(self.x)
@attr.gpu
def test_type_error_gpu(self):
self.check_type_error(cuda.to_gpu(self.x))
@testing.parameterize(
{'source': (1, 2), 'destination': (1, 2, 0)},
{'source': (0, 0), 'destination': (1, 2)},
{'source': (0, 1), 'destination': (2, 2)},
)
class TestMoveaxisValueError(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
def check_type_error(self, x):
with self.assertRaises(ValueError):
functions.moveaxis(x, self.source, self.destination)
def test_type_error_cpu(self):
self.check_type_error(self.x)
@attr.gpu
def test_type_error_gpu(self):
self.check_type_error(cuda.to_gpu(self.x))
@testing.parameterize(
{'source': (1, 2), 'destination': (1, 2.0)},
{'source': (1, 2.0), 'destination': (1, 2)},
)
class TestMoveaxisTypeError(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
def check_type_error(self, x):
with self.assertRaises(TypeError):
functions.moveaxis(x, self.source, self.destination)
# For escaping numpy==1.11 bug.
# numpy 1.11 allows float axis input.
@testing.with_requires('numpy!=1.11.*')
def test_type_error_cpu(self):
self.check_type_error(self.x)
@attr.gpu
def test_type_error_gpu(self):
self.check_type_error(cuda.to_gpu(self.x))
testing.run_module(__name__, __file__)
| 4,143
| 25.909091
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_flatten.py
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product({
'shape': [(3, 4), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestFlatten(testing.FunctionTestCase):
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
return x.flatten(),
def forward(self, inputs, device):
x, = inputs
y = functions.flatten(x)
return y,
testing.run_module(__name__, __file__)
| 989
| 20.521739
| 70
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_scatter_add.py
|
import unittest
import numpy
import chainer
from chainer import functions
from chainer import testing
from chainer.utils import type_check
@testing.parameterize(*testing.product_dict(
[{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
[{'slices': (0, slice(0, 1), numpy.array(-1)), 'b_data': numpy.array([1])},
{'slices': (slice(None), 0, [0, 2]),
'b_data': numpy.random.uniform(size=(4, 2))},
{'slices': ([1, 0], [0, 0], [2, 0]),
'b_data': numpy.random.uniform(size=(2,))},
{'slices': 1, 'b_data': numpy.random.uniform(size=(2, 3))},
{'slices': numpy.array([False, True, False, True]),
'b_data': numpy.random.uniform(size=(2, 2, 3))},
{'slices': [], 'b_data': numpy.empty(shape=(0, 2, 3))},
]
))
@testing.fix_random()
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestScatterAdd(testing.FunctionTestCase):
def setUp(self):
self.check_backward_options.update({'atol': 5e-4, 'rtol': 5e-4})
self.check_double_backward_options.update({
'atol': 1e-3, 'rtol': 1e-3})
def generate_inputs(self):
a = numpy.random.uniform(-1, 1, (4, 2, 3)).astype(self.dtype)
b = self.b_data.astype(self.dtype)
return a, b
def forward(self, inputs, device):
a, b = inputs
y = functions.scatter_add(a, self.slices, b)
return y,
def forward_expected(self, inputs):
a, b = inputs
a_copy = a.copy()
numpy.add.at(a_copy, self.slices, b)
return a_copy,
class TestInvalidScatterAdd(unittest.TestCase):
def setUp(self):
self.default_debug = chainer.is_debug()
chainer.set_debug(True)
self.a_data = numpy.random.uniform(-1, 1, (4, 3, 2))
self.b_data = numpy.random.uniform(-1, 1, (2, 2))
def tearDown(self):
chainer.set_debug(self.default_debug)
def test_multiple_ellipsis(self):
with self.assertRaises(ValueError):
functions.scatter_add(
self.a_data, (Ellipsis, Ellipsis), self.b_data)
def test_too_many_indices(self):
with self.assertRaises(type_check.InvalidType):
functions.scatter_add(self.a_data, (0, 0, 0, 0), self.b_data)
def test_requires_broadcasting(self):
with self.assertRaises(ValueError):
functions.scatter_add(self.a_data, slice(0, 2), self.b_data)
testing.run_module(__name__, __file__)
| 2,786
| 27.731959
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_as_strided.py
|
import unittest
import numpy as np
import chainer
from chainer import cuda
import chainer.functions as F
from chainer.functions.array.as_strided import _stride_array
from chainer import testing
def _broadcast_to(xp, x, shape):
if hasattr(xp, 'broadcast_to'):
return xp.broadcast_to(x, shape)
else:
dummy = xp.empty(shape)
return xp.broadcast_arrays(x, dummy)[0]
@testing.parameterize(
{'dtype': np.float16},
{'dtype': np.float32},
{'dtype': np.float64},
{'dtype': np.int16},
{'dtype': np.int32},
{'dtype': np.int64}
)
class TestStrideArray(unittest.TestCase):
def check_flip(self, xp):
x = xp.arange(4, dtype=self.dtype)
y = _stride_array(x, (4,), (-1,), 3) # [3, 2, 1, 0]
y_expected = x[::-1]
testing.assert_allclose(y, y_expected)
def test_flip_cpu(self):
self.check_flip(np)
@testing.attr.gpu
def test_flip_gpu(self):
self.check_flip(cuda.cupy)
def check_broadcast(self, xp):
x = xp.arange(12, dtype=self.dtype).reshape((3, 4)).copy()
# [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]
y = _stride_array(x, (2, 3, 4), (0, 4, 1), 0)
y_expected = _broadcast_to(xp, x, (2, 3, 4))
testing.assert_allclose(y, y_expected)
def test_broadcast_cpu(self):
self.check_broadcast(np)
@testing.attr.gpu
def test_broadcast_gpu(self):
self.check_broadcast(cuda.cupy)
def check_unstride(self, xp):
x = xp.arange(12, dtype=self.dtype).reshape((3, 4))[::-1]
y = _stride_array(x, (12,), (1,), 0)
y_expected = xp.arange(12, dtype=self.dtype)
testing.assert_allclose(y, y_expected)
def test_unstride_cpu(self):
self.check_unstride(np)
@testing.attr.gpu
def test_unstride_gpu(self):
self.check_unstride(cuda.cupy)
def check_general_stride(self, xp):
x = xp.arange(8, dtype=self.dtype)
y = _stride_array(x, (3, 3), (-1, 2), 3)
y_expected = xp.array(
[[3, 5, 7],
[2, 4, 6],
[1, 3, 5]],
dtype=self.dtype
)
testing.assert_allclose(y, y_expected)
def test_general_stride_cpu(self):
self.check_general_stride(np)
@testing.attr.gpu
def test_general_stride_gpu(self):
self.check_general_stride(cuda.cupy)
def check_invalid_negative_index(self, xp):
x = xp.arange(8, dtype=self.dtype)
with self.assertRaises(ValueError):
_stride_array(x, (3, 3), (-1, 2), 1)
def test_invalid_negative_index_cpu(self):
self.check_invalid_negative_index(np)
@testing.attr.gpu
def test_invalid_negative_index_gpu(self):
self.check_invalid_negative_index(cuda.cupy)
@testing.parameterize(
{'dtype': np.float16},
{'dtype': np.float32},
{'dtype': np.float64},
{'dtype': np.int16},
{'dtype': np.int32},
{'dtype': np.int64}
)
class TestAsStridedForward(unittest.TestCase):
def check_flip_forward(self, xp):
x = xp.arange(4, dtype=self.dtype)
v = chainer.Variable(x)
y = F.as_strided(v, (4,), (-1,), 3)
y_expected = x[::-1]
testing.assert_allclose(y.array, y_expected)
def test_flip_forward_cpu(self):
self.check_flip_forward(np)
@testing.attr.gpu
def test_flip_forward_gpu(self):
self.check_flip_forward(cuda.cupy)
def check_broadcast_forward(self, xp):
x = xp.arange(12, dtype=self.dtype).reshape((3, 4)).copy()
v = chainer.Variable(x)
y = F.as_strided(v, (2, 3, 4), (0, 4, 1), 0)
y_expected = _broadcast_to(xp, x, (2, 3, 4))
testing.assert_allclose(y.array, y_expected)
def test_broadcast_forward_cpu(self):
self.check_broadcast_forward(np)
@testing.attr.gpu
def test_broadcast_forward_gpu(self):
self.check_broadcast_forward(cuda.cupy)
def check_unstride_forward(self, xp):
x = xp.arange(12, dtype=self.dtype).reshape((3, 4))[::-1]
v = chainer.Variable(x)
y = F.as_strided(v, (12,), (1,), 0)
y_expected = xp.arange(12, dtype=self.dtype)
testing.assert_allclose(y.array, y_expected)
def test_unstride_forward_cpu(self):
self.check_unstride_forward(np)
@testing.attr.gpu
def test_unstride_forward_gpu(self):
self.check_unstride_forward(cuda.cupy)
def check_general_stride(self, xp):
x = _stride_array(xp.arange(8, dtype=self.dtype), (3, 3), (-1, 2), 3)
# [[3., 5., 7.], [2., 4., 6.], [1., 3., 5.]]
v = chainer.Variable(x)
y = F.as_strided(v, (3, 3), (1, 2), 0)
# [[0., 2., 4.], [1., 3., 5.,], [2., 4., 6.]]
y_expected = _stride_array(xp.arange(8, dtype=self.dtype),
(3, 3), (1, 2), 0)
assert (y.array == y_expected).all()
def test_general_stride_forward_cpu(self):
self.check_general_stride(np)
@testing.attr.gpu
def test_general_stride_forward_gpu(self):
self.check_general_stride(cuda.cupy)
@testing.parameterize(
{'dtype': np.float16},
{'dtype': np.float32},
{'dtype': np.float64}
)
class TestAsStridedBackward(unittest.TestCase):
def check_flip_backward(self, xp):
x = xp.arange(4, dtype=self.dtype)
v = chainer.Variable(x)
y = F.as_strided(v, (4,), (-1,), 3)
y.grad = xp.ones((4,), dtype=self.dtype)
gx, = chainer.grad((y,), (v,))
testing.assert_allclose(gx.array, xp.ones((4,), dtype=self.dtype))
def test_flip_backward_cpu(self):
self.check_flip_backward(np)
@testing.attr.gpu
def test_flip_backward_gpu(self):
self.check_flip_backward(cuda.cupy)
def check_broadcast_backward(self, xp):
x = xp.arange(12, dtype=self.dtype).reshape((3, 4)).copy()
v = chainer.Variable(x)
y = F.as_strided(v, (2, 3, 4), (0, 4, 1), 0)
y.grad = xp.ones((2, 3, 4), dtype=self.dtype)
gx, = chainer.grad((y,), (v,))
testing.assert_allclose(gx.array,
xp.ones(x.shape, dtype=self.dtype) * 2)
def test_broadcast_backward_cpu(self):
self.check_broadcast_backward(np)
@testing.attr.gpu
def test_broadcast_backward_gpu(self):
self.check_broadcast_backward(cuda.cupy)
def check_unstride_backward(self, xp):
x = xp.arange(12, dtype=self.dtype).reshape((3, 4))[::-1]
v = chainer.Variable(x)
y = F.as_strided(v, (12,), (1,), 0)
y.grad = xp.ones((12,), dtype=self.dtype)
gx, = chainer.grad((y,), (v,))
testing.assert_allclose(gx.array, xp.ones(x.shape, dtype=self.dtype))
def test_unstride_backward_cpu(self):
self.check_unstride_backward(np)
@testing.attr.gpu
def test_unstride_backward_gpu(self):
self.check_unstride_backward(cuda.cupy)
def check_general_stride_backward(self, xp):
x = _stride_array(xp.arange(8, dtype=self.dtype), (3, 3), (-1, 2), 3)
# [[3., 5., 7.], [2., 4., 6.], [1., 3., 5.]]
v = chainer.Variable(x)
y = F.as_strided(v, (3, 3), (1, 2), 0)
# [[0., 2., 4.], [1., 3., 5.,], [2., 4., 6.]]
y.grad = xp.ones(y.shape, dtype=self.dtype)
gx, = chainer.grad((y,), (v,))
testing.assert_allclose(gx.array,
xp.array([
[0.5, 0.5, 0.],
[2., 2., 1.],
[1., 0.5, 0.5]
], dtype=self.dtype)
)
def test_general_stride_backward_cpu(self):
self.check_general_stride_backward(np)
@testing.attr.gpu
def test_general_stride_backward_gpu(self):
self.check_general_stride_backward(cuda.cupy)
@testing.parameterize(
{'dtype': np.int16},
{'dtype': np.int32},
{'dtype': np.int64}
)
class TestAsStridedBackwardInvalidType(unittest.TestCase):
def check_flip_backward(self, xp):
x = xp.arange(4, dtype=self.dtype)
v = chainer.Variable(x)
y = F.as_strided(v, (4,), (-1,), 3)
y.grad = xp.ones((4,), dtype=self.dtype)
with self.assertRaises(TypeError):
gx, = chainer.grad((y,), (v,))
def test_flip_backward_cpu(self):
self.check_flip_backward(np)
@testing.attr.gpu
def test_flip_backward_gpu(self):
self.check_flip_backward(cuda.cupy)
def check_broadcast_backward(self, xp):
x = xp.arange(12, dtype=self.dtype).reshape((3, 4)).copy()
v = chainer.Variable(x)
y = F.as_strided(v, (2, 3, 4), (0, 4, 1), 0)
y.grad = xp.ones((2, 3, 4), dtype=self.dtype)
with self.assertRaises(TypeError):
gx, = chainer.grad((y,), (v,))
def test_broadcast_backward_cpu(self):
self.check_broadcast_backward(np)
@testing.attr.gpu
def test_broadcast_backward_gpu(self):
self.check_broadcast_backward(cuda.cupy)
def check_unstride_backward(self, xp):
x = xp.arange(12, dtype=self.dtype).reshape((3, 4))[::-1]
v = chainer.Variable(x)
y = F.as_strided(v, (12,), (1,), 0)
y.grad = xp.ones((12,), dtype=self.dtype)
with self.assertRaises(TypeError):
gx, = chainer.grad((y,), (v,))
def test_unstride_backward_cpu(self):
self.check_unstride_backward(np)
@testing.attr.gpu
def test_unstride_backward_gpu(self):
self.check_unstride_backward(cuda.cupy)
def check_general_stride_backward(self, xp):
x = _stride_array(xp.arange(8, dtype=self.dtype), (3, 3), (-1, 2), 3)
# [[3., 5., 7.], [2., 4., 6.], [1., 3., 5.]]
v = chainer.Variable(x)
y = F.as_strided(v, (3, 3), (1, 2), 0)
# [[0., 2., 4.], [1., 3., 5.,], [2., 4., 6.]]
y.grad = xp.ones(y.shape, dtype=self.dtype)
with self.assertRaises(TypeError):
gx, = chainer.grad((y,), (v,))
def test_general_stride_backward_cpu(self):
self.check_general_stride_backward(np)
@testing.attr.gpu
def test_general_stride_backward_gpu(self):
self.check_general_stride_backward(cuda.cupy)
testing.run_module(__name__, __file__)
| 10,239
| 31.507937
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_copy.py
|
import unittest
import numpy
import pytest
import chainer
from chainer import backend
from chainer.backends import _cpu
from chainer.backends import cuda
from chainer import functions
from chainer import testing
import chainerx
def _to_gpu(x, device_id):
if device_id >= 0:
return cuda.to_gpu(x, device_id)
else:
return x
_nonchainerx_backend_configs = (
[
# NumPy
{},
# CuPy
{'use_cuda': True, 'cuda_device': 0},
{'use_cuda': True, 'cuda_device': 1},
])
_chainerx_backend_configs = (
[
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
_backend_configs = _nonchainerx_backend_configs + _chainerx_backend_configs
_numpy_device = chainer.get_device('@numpy')
class CopyTestBase(object):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (10, 5)).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, (10, 5)).astype(self.dtype)
self.ggx = numpy.random.uniform(-1, 1, (10, 5)).astype(self.dtype)
self.check_double_backward_options = {}
if self.dtype == numpy.float16:
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
def _check_forward_internal(
self, dst_device_spec, src_device, dst_device, x_mode):
x = src_device.send(self.x)
if x_mode == 'array':
pass
elif x_mode == 'non_requires_grad':
x = chainer.Variable(x, requires_grad=False)
elif x_mode == 'requires_grad':
x = chainer.Variable(x, requires_grad=True)
else:
assert False, x_mode
error_expected = (
(src_device.xp is chainerx) != (dst_device.xp is chainerx)
and x_mode == 'requires_grad')
if error_expected:
with pytest.raises(RuntimeError):
functions.copy(x, dst_device_spec)
return
y = functions.copy(x, dst_device_spec)
assert y.device == dst_device
assert backend.get_device_from_array(y.array) == dst_device
assert y.dtype == self.dtype
numpy.testing.assert_array_equal(_numpy_device.send(y.array), self.x)
def check_forward(
self, dst_device_spec, src_device_spec, dst_device):
self._check_forward_internal(
dst_device_spec, src_device_spec, dst_device, 'array')
self._check_forward_internal(
dst_device_spec, src_device_spec, dst_device, 'non_requires_grad')
self._check_forward_internal(
dst_device_spec, src_device_spec, dst_device, 'requires_grad')
def test_forward(self, src_backend_config, dst_backend_config):
self.check_forward(
dst_backend_config.device,
src_backend_config.device,
dst_backend_config.device)
def test_backward(self, src_backend_config, dst_backend_config):
src_device = src_backend_config.device
dst_device = dst_backend_config.device
if (src_device.xp is chainerx) is not (dst_device.xp is chainerx):
raise unittest.SkipTest(
'ChainerX to non-ChainerX does not support backward.')
x = src_backend_config.get_array(self.x)
gy = dst_backend_config.get_array(self.gy)
x_var = chainer.Variable(x, requires_grad=True)
y_var = functions.copy(x_var, dst_device)
y_var.grad = gy
y_var.backward()
x_grad = x_var.grad
assert x_var.grad_var.device == src_device
assert backend.get_device_from_array(x_grad) == src_device
numpy.testing.assert_array_equal(_numpy_device.send(x_grad), self.gy)
def test_double_backward(self, src_backend_config, dst_backend_config):
src_device = src_backend_config.device
dst_device = dst_backend_config.device
if (src_device.xp is chainerx) is not (dst_device.xp is chainerx):
raise unittest.SkipTest(
'ChainerX to non-ChainerX does not support backward.')
x = src_backend_config.get_array(self.x)
gy = dst_backend_config.get_array(self.gy)
ggx = src_backend_config.get_array(self.ggx)
x_var = chainer.Variable(x, requires_grad=True)
y_var = functions.copy(x_var, dst_device)
y_var.grad = gy
gy_var = y_var.grad_var
y_var.backward(enable_double_backprop=True)
assert x_var.grad_var.requires_grad is True
x_var.grad_var.grad = ggx
x_var.grad_var.backward()
assert gy_var.grad_var.device == dst_device
assert (
backend.get_device_from_array(gy_var.grad_var.array)
== dst_device)
numpy.testing.assert_array_equal(
_numpy_device.send(gy_var.grad_var.array), self.ggx)
@testing.inject_backend_tests(None, _nonchainerx_backend_configs) # dst
@testing.inject_backend_tests(None, _backend_configs) # src
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestCopyToNonChainerx(CopyTestBase, unittest.TestCase):
def test_forward_int(self, src_backend_config, dst_backend_config):
assert dst_backend_config.xp is not chainerx
src_device = src_backend_config.device
dst_device = dst_backend_config.device
if dst_device.xp is numpy:
dst_device_spec = -1
elif dst_device.xp is chainer.backends.cuda.cupy:
dst_device_spec = dst_device.device.id
else:
assert False, dst_device
self.check_forward(
dst_device_spec,
src_device,
dst_device)
def test_forward_str(self, src_backend_config, dst_backend_config):
assert dst_backend_config.xp is not chainerx
src_device = src_backend_config.device
dst_device = dst_backend_config.device
if dst_device.xp is numpy:
dst_device_spec = '@numpy'
elif dst_device.xp is chainer.backends.cuda.cupy:
dst_device_spec = '@cupy:{}'.format(dst_device.device.id)
else:
assert False, dst_device
self.check_forward(
dst_device_spec,
src_device,
dst_device)
@testing.inject_backend_tests(None, _chainerx_backend_configs) # dst
@testing.inject_backend_tests(None, _backend_configs) # src
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestCopyToChainerx(CopyTestBase, unittest.TestCase):
def test_forward_str(self, src_backend_config, dst_backend_config):
assert dst_backend_config.xp is chainerx
src_device = src_backend_config.device
dst_device = dst_backend_config.device
dst_device_spec = dst_device.device.name
self.check_forward(
dst_device_spec,
src_device,
dst_device)
@testing.inject_backend_tests(None, _chainerx_backend_configs)
@testing.inject_backend_tests(None, _nonchainerx_backend_configs)
class TestCopyBetweenChainerxAndNonChainerx(unittest.TestCase):
# Copy between non-ChainerX and ChainerX devices are not supported.
dtype = numpy.float32
def check_invalid(self, src_device, dst_device_spec):
x = src_device.send(
numpy.random.uniform(-1, 1, (10, 5)).astype(self.dtype))
x_var = chainer.Variable(x)
with pytest.raises(RuntimeError):
functions.copy(x_var, dst_device_spec)
def test_invalid(self, nonchx_backend_config, chx_backend_config):
assert nonchx_backend_config.xp is not chainerx
assert chx_backend_config.xp is chainerx
self.check_invalid(
nonchx_backend_config.device, chx_backend_config.device)
self.check_invalid(
chx_backend_config.device, nonchx_backend_config.device)
# cuda.DummyDevice is not supported either.
self.check_invalid(
chx_backend_config.device, cuda.DummyDevice)
@testing.inject_backend_tests(None, _nonchainerx_backend_configs)
@testing.inject_backend_tests(None, _nonchainerx_backend_configs)
class TestCopyCudaDummyDevice(unittest.TestCase):
def test_dummy_device(self, src_backend_config, current_backend_config):
x_arr = src_backend_config.get_array(numpy.zeros((2, 3)))
with current_backend_config:
y = functions.copy(x_arr, cuda.DummyDevice)
# Always transferred to NumPy device, regardless of the current CUDA
# device.
assert isinstance(y.device, _cpu.CpuDevice)
testing.run_module(__name__, __file__)
| 8,705
| 32.744186
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_flip.py
|
import unittest
import numpy
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
@testing.parameterize(*testing.product_dict(
[
{'shape': (1,), 'axis': 0},
{'shape': (2, 3, 4), 'axis': 0},
{'shape': (2, 3, 4), 'axis': 1},
{'shape': (2, 3, 4), 'axis': 2},
{'shape': (2, 3, 4), 'axis': -3},
{'shape': (2, 3, 4), 'axis': -2},
{'shape': (2, 3, 4), 'axis': -1},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestFlip(testing.FunctionTestCase):
def setUp(self):
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
flip_func = getattr(numpy, 'flip', functions.array.flip._flip)
y_expected = flip_func(x, self.axis)
return y_expected,
def forward(self, inputs, devices):
x, = inputs
y = functions.flip(x, self.axis)
return y,
@testing.parameterize(
{'axis': 3},
{'axis': -4},
)
class TestFlipInvalidTypeAxis(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
def check_type_error(self, x):
with self.assertRaises(type_check.InvalidType):
functions.flip(x, self.axis)
def test_type_error_cpu(self):
self.check_type_error(self.x)
@attr.gpu
def test_type_error_gpu(self):
self.check_type_error(cuda.to_gpu(self.x))
class TestFlipInvalidTypeError(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (2, 3, 4)).astype('f')
def test_invalid_axis(self):
with self.assertRaises(TypeError):
functions.flip(self.x, 'a')
@testing.parameterize(*testing.product_dict(
[
{'shape': (1,), 'axis': 0},
{'shape': (2, 3, 4), 'axis': 0},
{'shape': (2, 3, 4), 'axis': 1},
{'shape': (2, 3, 4), 'axis': 2},
{'shape': (2, 3, 4), 'axis': -3},
{'shape': (2, 3, 4), 'axis': -2},
{'shape': (2, 3, 4), 'axis': -1},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
@testing.with_requires('numpy>=1.12.0')
class TestFlipFunction(unittest.TestCase):
def test_equal_to_numpy_flip(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
numpy.testing.assert_array_equal(
functions.array.flip._flip(x, self.axis),
numpy.flip(x, self.axis))
testing.run_module(__name__, __file__)
| 3,218
| 24.752
| 73
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_hstack.py
|
import unittest
import numpy
import six
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
@testing.parameterize(*testing.product_dict(
[
{'shape': (2, 3, 4), 'y_shape': (2, 6, 4), 'xs_length': 2},
{'shape': (3, 4), 'y_shape': (3, 8), 'xs_length': 2},
{'shape': (3), 'y_shape': (6,), 'xs_length': 2},
{'shape': (), 'y_shape': (2,), 'xs_length': 2},
{'shape': (2, 3, 4), 'y_shape': (2, 3, 4), 'xs_length': 1},
{'shape': (3, 4), 'y_shape': (3, 4), 'xs_length': 1},
{'shape': (3), 'y_shape': (3,), 'xs_length': 1},
{'shape': (), 'y_shape': (1,), 'xs_length': 1},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
{'use_ideep': 'always'},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
class TestHstack(testing.FunctionTestCase):
def generate_inputs(self):
return tuple([
numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
for i in six.moves.range(self.xs_length)])
def forward(self, inputs, device):
y = functions.hstack(inputs)
return y,
def forward_expected(self, inputs):
y = numpy.hstack(inputs)
return y,
@testing.parameterize(
{'a_shape': (2, 4, 5), 'b_shape': (3, 4, 5), 'valid': False},
{'a_shape': (3, 4, 6), 'b_shape': (3, 4, 5), 'valid': False},
{'a_shape': (3, 6, 5), 'b_shape': (3, 4, 5), 'valid': True},
{'a_shape': (3, 4), 'b_shape': (4, 4), 'valid': False},
{'a_shape': (3, 4), 'b_shape': (3, 3), 'valid': True},
{'a_shape': (3,), 'b_shape': (4,), 'valid': True},
{'a_shape': (3), 'b_shape': (3, 3), 'valid': False},
)
class TestHstackTypeCheck(unittest.TestCase):
def setUp(self):
self.xs = [
numpy.random.uniform(-1, 1, self.a_shape).astype(numpy.float32),
numpy.random.uniform(-1, 1, self.b_shape).astype(numpy.float32),
]
def check_value_check(self):
if self.valid:
# Check if it throws nothing
functions.hstack(self.xs)
else:
with self.assertRaises(type_check.InvalidType):
functions.hstack(self.xs)
def test_value_check_cpu(self):
self.check_value_check()
@attr.gpu
def test_value_check_gpu(self):
self.check_value_check()
testing.run_module(__name__, __file__)
| 2,840
| 28.28866
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_vstack.py
|
import unittest
import numpy
import pytest
import six
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
@testing.parameterize(*testing.product_dict(
[
{'shape': (2, 3, 4), 'y_shape': (4, 3, 4), 'xs_length': 2},
{'shape': (3, 4), 'y_shape': (6, 4), 'xs_length': 2},
{'shape': (3), 'y_shape': (2, 3), 'xs_length': 2},
{'shape': (), 'y_shape': (2, 1), 'xs_length': 2},
{'shape': (2, 3, 4), 'y_shape': (2, 3, 4), 'xs_length': 1},
{'shape': (3, 4), 'y_shape': (3, 4), 'xs_length': 1},
{'shape': (3), 'y_shape': (1, 3), 'xs_length': 1},
{'shape': (), 'y_shape': (1, 1), 'xs_length': 1},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestVstack(testing.FunctionTestCase):
def setup(self):
self.check_double_backward_options = {}
if self.dtype == numpy.float16:
self.check_double_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
def generate_inputs(self):
xs = tuple(
numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
for _ in six.moves.range(self.xs_length)
)
return xs
def forward_expected(self, inputs):
x = list(inputs)
y_expect = numpy.vstack(x)
return y_expect,
def forward(self, inputs, device):
x = list(inputs)
y = functions.vstack(x)
return y,
@testing.parameterize(
{'a_shape': (2, 4, 5), 'b_shape': (3, 4, 5), 'valid': True},
{'a_shape': (3, 4, 6), 'b_shape': (3, 4, 5), 'valid': False},
{'a_shape': (3, 6, 5), 'b_shape': (3, 4, 5), 'valid': False},
{'a_shape': (3, 4), 'b_shape': (4, 4), 'valid': True},
{'a_shape': (3, 4), 'b_shape': (3, 3), 'valid': False},
{'a_shape': (3,), 'b_shape': (4,), 'valid': False},
{'a_shape': (3), 'b_shape': (3, 3), 'valid': False},
{'a_shape': (), 'b_shape': (1), 'valid': False},
)
class TestVstackTypeCheck(unittest.TestCase):
def setUp(self):
self.xs = [
numpy.random.uniform(-1, 1, self.a_shape).astype(numpy.float32),
numpy.random.uniform(-1, 1, self.b_shape).astype(numpy.float32),
]
def check_value_check(self):
if self.valid:
# Check if it throws nothing
functions.vstack(self.xs)
else:
with pytest.raises(type_check.InvalidType):
functions.vstack(self.xs)
def test_value_check_cpu(self):
self.check_value_check()
@attr.gpu
def test_value_check_gpu(self):
self.check_value_check()
testing.run_module(__name__, __file__)
| 3,112
| 27.824074
| 77
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_resize_images.py
|
import unittest
import numpy
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'in_shape': [(2, 3, 8, 6), (2, 1, 4, 6)],
'mode': ['bilinear', 'nearest'],
'align_corners': [True, False],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestResizeImagesForwardIdentity(testing.FunctionTestCase):
def generate_inputs(self):
x = numpy.random.uniform(
size=self.in_shape).astype(numpy.float32)
return x,
def forward_expected(self, inputs):
x, = inputs
return x,
def forward(self, inputs, device):
x, = inputs
output_shape = self.in_shape[2:]
y = functions.resize_images(
x, output_shape,
mode=self.mode, align_corners=self.align_corners)
return y,
@testing.parameterize(*testing.product({
'in_shape': [(2, 2, 4, 4)],
'output_shape': [(2, 2, 2, 2)],
'mode': ['bilinear', 'nearest'],
'align_corners': [True, False],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestResizeImagesForwardDownScale(testing.FunctionTestCase):
def generate_inputs(self):
x = numpy.zeros(self.in_shape, dtype=numpy.float32)
x[:, :, :2, :2] = 1
x[:, :, 2:, :2] = 2
x[:, :, :2, 2:] = 3
x[:, :, 2:, 2:] = 4
return x,
def forward_expected(self, inputs):
y_expect = numpy.zeros(self.output_shape, dtype=numpy.float32)
y_expect[:, :, 0, 0] = 1
y_expect[:, :, 1, 0] = 2
y_expect[:, :, 0, 1] = 3
y_expect[:, :, 1, 1] = 4,
return y_expect,
def forward(self, inputs, device):
x, = inputs
output_shape = self.output_shape[2:]
y = functions.resize_images(
x, output_shape,
mode=self.mode, align_corners=self.align_corners)
return y,
@testing.parameterize(*testing.product({
'in_shape': [(1, 1, 2, 2)],
'output_shape': [(1, 1, 3, 3)],
'align_corners': [True, False],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestResizeImagesForwardUpScaleBilnear(testing.FunctionTestCase):
def generate_inputs(self):
x = numpy.zeros(self.in_shape, dtype=numpy.float32)
x[:, :, 0, 0] = 1
x[:, :, 1, 0] = 2
x[:, :, 0, 1] = 3
x[:, :, 1, 1] = 4
return x,
def forward_expected(self, inputs):
y_expect = numpy.zeros(self.output_shape, dtype=numpy.float32)
y_expect[0, 0, :, :] = numpy.array(
[[1., 2., 3.],
[1.5, 2.5, 3.5],
[2., 3., 4.]],
dtype=numpy.float32)
return y_expect,
def forward(self, inputs, device):
x, = inputs
output_shape = self.output_shape[2:]
y = functions.resize_images(
x, output_shape, align_corners=self.align_corners)
return y,
class TestResizeImagesForwardMultiLinesAlignCorners(unittest.TestCase):
in_shape = (1, 1, 987, 123)
output_shape = (1, 1, 765, 345)
def setUp(self):
self.x = numpy.arange(numpy.prod(self.in_shape), dtype=numpy.float32)
self.x = self.x.reshape(self.in_shape)
out_row = numpy.linspace(0, 123 - 1, 345, dtype=numpy.float32)
out_col = numpy.linspace(0, (987 - 1) * 123, 765, dtype=numpy.float32)
self.out = (out_row + out_col[:, None]).reshape(self.output_shape)
def check_forward(self, x, output_shape):
y = functions.resize_images(x, output_shape)
testing.assert_allclose(y.data, self.out)
def test_forward_cpu(self):
self.check_forward(self.x, output_shape=self.output_shape[2:])
@attr.gpu
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x), output_shape=self.output_shape[2:])
@testing.parameterize(*testing.product({
'in_shape': [(2, 3, 8, 6), (2, 1, 4, 6)],
'output_shape': [(10, 5), (3, 4)],
'mode': ['bilinear', 'nearest'],
'align_corners': [False, True],
}))
class TestResizeImagesBackward(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(
size=self.in_shape).astype(numpy.float32)
output_shape_4d = self.in_shape[:2] + self.output_shape
self.gy = numpy.random.uniform(
size=output_shape_4d).astype(numpy.float32)
self.ggx = numpy.random.uniform(
size=self.in_shape).astype(numpy.float32)
def check_backward(self, x, output_shape, gy):
def f(x):
return functions.resize_images(
x, output_shape,
mode=self.mode, align_corners=self.align_corners)
gradient_check.check_backward(
f, x, gy, dtype='d', atol=1e-2, rtol=1e-3, eps=1e-5)
def test_backward_cpu(self):
self.check_backward(self.x, self.output_shape, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), self.output_shape,
cuda.to_gpu(self.gy))
def check_double_backward(self, x, output_shape, gy, ggx):
def f(x):
return functions.resize_images(
x, output_shape,
mode=self.mode, align_corners=self.align_corners)
gradient_check.check_double_backward(
f, x, gy, ggx, atol=1e-2, rtol=1e-3)
def test_double_backward_cpu(self):
self.check_double_backward(
self.x, self.output_shape, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(cuda.to_gpu(self.x), self.output_shape,
cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
testing.run_module(__name__, __file__)
| 6,798
| 27.809322
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_where.py
|
import unittest
import numpy
import pytest
import chainer
from chainer import functions
from chainer import testing
from chainer.utils import type_check
@testing.parameterize(*testing.product({
'shape': [
# c, x, y, output
((3, 2, 4),) * 4,
((4,), (3, 1, 1), (2, 1), (3, 2, 4)),
],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestWhere(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_backward_options.update({
'atol': 1e-3, 'rtol': 1e-3,
})
def generate_inputs(self):
c_shape, x_shape, y_shape, out_shape = self.shape
c = numpy.random.uniform(-1, 1, c_shape) > 0
x = numpy.random.uniform(-1, 1, x_shape).astype(self.dtype)
y = numpy.random.uniform(-1, 1, y_shape).astype(self.dtype)
return c, x, y
def forward_expected(self, inputs):
c, x, y = inputs
z_expected = numpy.where(c, x, y)
return z_expected,
def forward(self, inputs, devices):
c, x, y = inputs
z = functions.where(c, x, y)
return z,
class TestWhereTypeCheck(unittest.TestCase):
def check_forward_raises(self, c_data, x_data, y_data):
c = chainer.Variable(c_data)
x = chainer.Variable(x_data)
y = chainer.Variable(y_data)
with pytest.raises(type_check.InvalidType):
functions.where(c, x, y)
def test_cond_int(self):
c_data = numpy.zeros(3, numpy.int32)
x_data = numpy.zeros(3, numpy.float32)
y_data = numpy.zeros(3, numpy.float32)
self.check_forward_raises(c_data, x_data, y_data)
def test_xy_precision(self):
c_data = numpy.zeros(3, numpy.bool_)
x_data = numpy.zeros(3, numpy.float32)
y_data = numpy.zeros(3, numpy.float64)
self.check_forward_raises(c_data, x_data, y_data)
def test_shape(self):
c_data = numpy.zeros(3, numpy.bool_)
x_data = numpy.zeros(1, numpy.float32)
y_data = numpy.zeros(2, numpy.float32)
self.check_forward_raises(c_data, x_data, y_data)
testing.run_module(__name__, __file__)
| 2,560
| 26.537634
| 67
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_diagonal.py
|
import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*testing.product_dict(
[
{'shape': (2, 4, 6), 'args': (1, 2, 0)},
{'shape': (2, 4, 6), 'args': (-1, 2, 0)},
{'shape': (2, 4, 6), 'args': (0, -1, -2)},
{'shape': (2, 4, 6), 'args': (0, -1, 1)},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
@testing.inject_backend_tests(
None,
# CPU tests
testing.product({
'use_ideep': ['never', 'always'],
})
# GPU tests
+ [{'use_cuda': True}]
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
class TestDiagonal(testing.FunctionTestCase):
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
return x,
def forward(self, inputs, device):
x, = inputs
return functions.diagonal(x, *self.args),
def forward_expected(self, inputs):
x, = inputs
return x.diagonal(*self.args),
testing.run_module(__name__, __file__)
| 1,186
| 22.74
| 70
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_cast.py
|
import unittest
import numpy
import chainer
from chainer import functions
from chainer import testing
from chainer.testing import attr
import chainerx
if chainerx.is_available():
import chainerx.testing
@testing.parameterize(*testing.product_dict(
[
{'shape': (3, 4)},
{'shape': ()},
],
[
{'in_type': numpy.bool_},
{'in_type': numpy.uint8},
{'in_type': numpy.uint64},
{'in_type': numpy.int8},
{'in_type': numpy.int64},
{'in_type': numpy.float16},
{'in_type': numpy.float32},
{'in_type': numpy.float64},
],
[
{'out_type': numpy.bool_},
{'out_type': numpy.uint8},
{'out_type': numpy.uint64},
{'out_type': numpy.int8},
{'out_type': numpy.int64},
{'out_type': numpy.float16},
{'out_type': numpy.float32},
{'out_type': numpy.float64},
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
@attr.chainerx
class TestCast(testing.FunctionTestCase):
def _skip_chainerx_unsupported_dtype(self):
supported_dtypes = chainerx.testing.dtypes.all_dtypes
if (self.in_type.__name__ not in supported_dtypes
or self.out_type.__name__ not in supported_dtypes):
raise unittest.SkipTest(
'ChainerX does not support either of {} or {} dtypes'.format(
self.in_type.__name__, self.out_type.__name__))
def setUp(self):
# Skip e.g. uint64 for ChainerX.
self._skip_chainerx_unsupported_dtype()
if (numpy.dtype(self.in_type).kind != 'f'
or numpy.dtype(self.out_type).kind != 'f'):
self.skip_backward_test = True
self.skip_double_backward_test = True
if (numpy.dtype(self.in_type).kind == 'f'
and self.out_type == numpy.float16):
self.check_forward_options.update({'atol': 1e-3, 'rtol': 1e-3})
self.check_backward_options.update({
'eps': 2.0 ** -2, 'atol': 1e-2, 'rtol': 1e-3})
self.check_double_backward_options.update({
'eps': 2.0 ** -2, 'atol': 1e-2, 'rtol': 1e-3})
def generate_inputs(self):
x = numpy.asarray(numpy.random.randn(*self.shape)).astype(self.in_type)
# The result of a cast from a negative floating-point number to
# an unsigned integer is not specified. Avoid testing that condition.
float_to_uint = (
issubclass(self.in_type, numpy.floating)
and issubclass(self.out_type, numpy.unsignedinteger))
if float_to_uint:
x[x < 0] *= -1
return x,
def forward_expected(self, inputs):
x, = inputs
return x.astype(self.out_type),
def forward(self, inputs, devices):
x, = inputs
y = functions.cast(x, self.out_type)
return y,
class TestNoCast(unittest.TestCase):
def setUp(self):
self.dtype = numpy.float32
self.x = numpy.random.uniform(-100, 100, (1,)).astype(self.dtype)
def check_forward_no_cast(self, x_data):
y = functions.cast(x_data, self.dtype)
assert isinstance(y, chainer.Variable)
assert y.data is x_data
def test_forward_no_cast_array(self):
y = functions.cast(self.x, self.dtype)
assert isinstance(y, chainer.Variable)
assert y.data is self.x
def test_forward_no_cast_variable(self):
# If backprop is disabled, it's safe to simply return the input
# variable for no-op casts.
x = chainer.Variable(self.x)
with chainer.using_config('enable_backprop', False):
y = functions.cast(x, self.dtype)
assert y is x
def test_forward_no_cast_grad(self):
# This test would fail if F.cast does not create new function nodes for
# no-op casts
x = chainer.Variable(self.x)
y1 = functions.cast(x, self.dtype)
y2 = functions.cast(x, self.dtype)
z = y1 + y2
gy1, gy2 = chainer.grad([z], [y1, y2], [numpy.ones_like(z.data)])
assert gy1.dtype == self.dtype
assert gy2.dtype == self.dtype
numpy.testing.assert_array_equal(gy1.data, numpy.ones_like(y1.data))
numpy.testing.assert_array_equal(gy2.data, numpy.ones_like(y2.data))
testing.run_module(__name__, __file__)
| 4,719
| 30.677852
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/functions_tests/array_tests/test_squeeze.py
|
import unittest
import numpy
from chainer.backends import cuda
from chainer import functions
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ testing.product({
'use_chainerx': [True],
'chainerx_device': ['native:0', 'cuda:0', 'cuda:1'],
})
)
@testing.parameterize(*testing.product_dict(
[
{'axis': None, 'out_shape': (3,)},
{'axis': 1, 'out_shape': (1, 3, 1)},
{'axis': -3, 'out_shape': (1, 3, 1)},
{'axis': (0, 1, 3), 'out_shape': (3,)},
{'axis': (3, 1, 0), 'out_shape': (3,)},
{'axis': (-4, -3, -1), 'out_shape': (3,)},
{'axis': (-1, -3, -4), 'out_shape': (3,)},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
class TestSqueeze(testing.FunctionTestCase):
def setUp(self):
if self.dtype == numpy.float16:
self.check_forward_options.update({'atol': 5e-4, 'rtol': 5e-3})
self.check_backward_options.update({
'atol': 2 ** -4, 'rtol': 2 ** -4})
def generate_inputs(self):
x = numpy.random.uniform(-1, 1, (1, 1, 3, 1)).astype(self.dtype)
return x,
def forward_expected(self, inputs):
x, = inputs
y = numpy.squeeze(x, axis=self.axis)
return y,
def forward(self, inputs, device):
x, = inputs
return functions.squeeze(x, axis=self.axis),
@testing.parameterize(*testing.product(
{'axis': [1, (1,)]},
))
class TestSqueezeValueError(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (1, 3, 1)).astype('f')
def check_invalid_type(self, x_data):
with self.assertRaises(ValueError):
functions.squeeze(x_data, axis=self.axis)
def test_invalid_type_cpu(self):
self.check_invalid_type(self.x)
@attr.gpu
def test_type_error_gpu(self):
self.check_invalid_type(cuda.to_gpu(self.x))
@testing.parameterize(*testing.product(
{'axis': [3, -4, (3,), (-4,)]},
))
class TestSqueezeInvalidType(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (1, 3, 1)).astype('f')
def check_invalid_type(self, x_data):
with self.assertRaises(type_check.InvalidType):
functions.squeeze(x_data, axis=self.axis)
def test_invalid_type_cpu(self):
self.check_invalid_type(self.x)
@attr.gpu
def test_type_error_gpu(self):
self.check_invalid_type(cuda.to_gpu(self.x))
class TestSqueezeTypeError(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (1, 3, 1)).astype('f')
def test_invalid_axis(self):
with self.assertRaises(TypeError):
functions.squeeze(self.x, axis='a')
testing.run_module(__name__, __file__)
| 3,111
| 25.151261
| 75
|
py
|
chainer
|
chainer-master/tests/chainer_tests/serializers_tests/test_npz.py
|
import os
import tempfile
import unittest
import mock
import numpy
import pytest
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import link
from chainer import links
from chainer import optimizers
from chainer.serializers import npz
from chainer import testing
from chainer.testing import attr
import chainerx
class TestDictionarySerializer(unittest.TestCase):
def setUp(self):
self.serializer = npz.DictionarySerializer({})
self.data = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
def test_get_item(self):
child = self.serializer['x']
self.assertIsInstance(child, npz.DictionarySerializer)
self.assertEqual(child.path, 'x/')
def test_get_item_strip_slashes(self):
child = self.serializer['/x/']
self.assertEqual(child.path, 'x/')
def check_serialize(self, data, query):
ret = self.serializer(query, data)
dset = self.serializer.target['w']
self.assertIsInstance(dset, numpy.ndarray)
self.assertEqual(dset.shape, data.shape)
self.assertEqual(dset.size, data.size)
self.assertEqual(dset.dtype, data.dtype)
numpy.testing.assert_array_equal(dset, backend.CpuDevice().send(data))
self.assertIs(ret, data)
@attr.chainerx
def test_serialize_chainerx(self):
self.check_serialize(chainerx.asarray(self.data), 'w')
def test_serialize_cpu(self):
self.check_serialize(self.data, 'w')
@attr.gpu
def test_serialize_gpu(self):
self.check_serialize(cuda.to_gpu(self.data), 'w')
def test_serialize_cpu_strip_slashes(self):
self.check_serialize(self.data, '/w')
@attr.gpu
def test_serialize_gpu_strip_slashes(self):
self.check_serialize(cuda.to_gpu(self.data), '/w')
def test_serialize_scalar(self):
ret = self.serializer('x', 10)
dset = self.serializer.target['x']
self.assertIsInstance(dset, numpy.ndarray)
self.assertEqual(dset.shape, ())
self.assertEqual(dset.size, 1)
self.assertEqual(dset.dtype, int)
self.assertEqual(dset[()], 10)
self.assertIs(ret, 10)
def test_serialize_none(self):
ret = self.serializer('x', None)
dset = self.serializer.target['x']
self.assertIsInstance(dset, numpy.ndarray)
self.assertEqual(dset.shape, ())
self.assertEqual(dset.dtype, numpy.object)
self.assertIs(dset[()], None)
self.assertIs(ret, None)
@testing.parameterize(*testing.product({'compress': [False, True]}))
class TestNpzDeserializer(unittest.TestCase):
def setUp(self):
self.data = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
with open(path, 'wb') as f:
savez = numpy.savez_compressed if self.compress else numpy.savez
savez(
f, **{'x/': None, 'y': self.data, 'z': numpy.asarray(10),
'zf32': numpy.array(-2**60, dtype=numpy.float32),
'zi64': numpy.array(-2**60, dtype=numpy.int64),
'w': None})
try:
self.npzfile = numpy.load(path, allow_pickle=True)
except TypeError:
self.npzfile = numpy.load(path)
self.deserializer = npz.NpzDeserializer(self.npzfile)
def tearDown(self):
if hasattr(self, 'npzfile'):
self.npzfile.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_get_item(self):
child = self.deserializer['x']
self.assertIsInstance(child, npz.NpzDeserializer)
self.assertEqual(child.path[-2:], 'x/')
def test_get_item_strip_slashes(self):
child = self.deserializer['/x/']
self.assertEqual(child.path, 'x/')
def check_deserialize(self, y, query):
ret = self.deserializer(query, y)
numpy.testing.assert_array_equal(
backend.CpuDevice().send(y), self.data)
self.assertIs(ret, y)
def check_deserialize_by_passing_none(self, y, query):
ret = self.deserializer(query, None)
numpy.testing.assert_array_equal(
backend.CpuDevice().send(ret), self.data)
@attr.chainerx
def test_deserialize_chainerx(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(chainerx.asarray(y), 'y')
@attr.chainerx
@attr.gpu
def test_deserialize_chainerx_non_native(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(chainerx.asarray(y, device='cuda:0'), 'y')
def test_deserialize_cpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(y, 'y')
def test_deserialize_by_passing_none_cpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize_by_passing_none(y, 'y')
@attr.gpu
def test_deserialize_gpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(cuda.to_gpu(y), 'y')
@attr.ideep
def test_deserialize_ideep(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(intel64.mdarray(y), 'y')
@attr.gpu
def test_deserialize_by_passing_none_gpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize_by_passing_none(cuda.to_gpu(y), 'y')
def test_deserialize_cpu_strip_slashes(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(y, '/y')
@attr.gpu
def test_deserialize_gpu_strip_slashes(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(cuda.to_gpu(y), '/y')
def test_deserialize_different_dtype_cpu(self):
y = numpy.empty((2, 3), dtype=numpy.float16)
ret = self.deserializer('y', y)
numpy.testing.assert_array_equal(y, self.data.astype(numpy.float16))
self.assertIs(ret, y)
@attr.gpu
def test_deserialize_different_dtype_gpu(self):
y = cuda.cupy.empty((2, 3), dtype=numpy.float16)
ret = self.deserializer('y', y)
numpy.testing.assert_array_equal(
y.get(), self.data.astype(numpy.float16))
self.assertIs(ret, y)
def test_deserialize_scalar(self):
z = 5
ret = self.deserializer('z', z)
self.assertEqual(ret, 10)
def test_deserialize_int64_to_int(self):
z = int(5)
ret = self.deserializer('zi64', z)
assert ret == -2**60
def test_deserialize_int64_to_uint32(self):
z = numpy.uint32(5)
with pytest.raises(TypeError):
self.deserializer('zi64', z)
def test_deserialize_float32_to_int(self):
z = int(5)
with pytest.raises(TypeError):
self.deserializer('zf32', z)
def test_deserialize_none(self):
ret = self.deserializer('w', None)
self.assertIs(ret, None)
def test_deserialize_by_passing_array(self):
y = numpy.empty((1,), dtype=numpy.float32)
ret = self.deserializer('w', y)
self.assertIs(ret, None)
class TestNpzDeserializerNonStrict(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
with open(path, 'wb') as f:
numpy.savez(
f, **{'x': numpy.asarray(10)})
self.npzfile = numpy.load(path)
self.deserializer = npz.NpzDeserializer(self.npzfile, strict=False)
def tearDown(self):
if hasattr(self, 'npzfile'):
self.npzfile.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_deserialize_partial(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
ret = self.deserializer('y', y)
self.assertIs(ret, y)
@testing.parameterize(
{'ignore_names': 'yy'},
{'ignore_names': ['yy']},
{'ignore_names': lambda key: key == 'yy'},
{'ignore_names': [lambda key: key == 'yy']},
)
class TestNpzDeserializerIgnoreNames(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
with open(path, 'wb') as f:
numpy.savez(
f, **{'x': numpy.asarray(10), 'yy': numpy.empty((2, 3))})
self.npzfile = numpy.load(path)
self.deserializer = npz.NpzDeserializer(
self.npzfile, ignore_names=self.ignore_names)
def tearDown(self):
if hasattr(self, 'npzfile'):
self.npzfile.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_deserialize_ignore_names(self):
yy = numpy.ones((2, 1), dtype=numpy.float32)
ret = self.deserializer('yy', yy)
self.assertIs(ret, yy)
@testing.parameterize(
{'ignore_names': 'yy'},
{'ignore_names': ['yy']},
{'ignore_names': lambda key: key == 'yy'},
{'ignore_names': [lambda key: key == 'yy']},
)
class TestLoadNpzIgnoreNames(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
self.x = numpy.asarray(10, dtype=numpy.float32)
self.yy = numpy.ones((2, 3), dtype=numpy.float32)
with open(path, 'wb') as f:
numpy.savez(
f, **{'x': self.x, 'yy': self.yy})
def tearDown(self):
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_load_npz_ignore_names(self):
chain = link.Chain()
with chain.init_scope():
chain.x = chainer.variable.Parameter(shape=())
chain.yy = chainer.variable.Parameter(shape=(2, 3))
npz.load_npz(
self.temp_file_path, chain, ignore_names=self.ignore_names)
self.assertEqual(chain.x.data, self.x)
self.assertFalse(numpy.all(chain.yy.data == self.yy))
@testing.parameterize(*testing.product({'file_type': ['filename', 'bytesio']}))
class TestNpzDeserializerNonStrictGroupHierachy(unittest.TestCase):
def setUp(self):
if self.file_type == 'filename':
fd, path = tempfile.mkstemp()
os.close(fd)
self.file = path
elif self.file_type == 'bytesio':
self.file = six.BytesIO()
else:
assert False
# Create and save a link
child = link.Chain()
with child.init_scope():
child.linear = links.Linear(2, 3)
parent = link.Chain()
with parent.init_scope():
parent.linear = links.Linear(3, 2)
parent.child = child
npz.save_npz(self.file, parent)
self.source = parent
if self.file_type == 'bytesio':
self.file.seek(0)
self.npzfile = numpy.load(self.file)
self.deserializer = npz.NpzDeserializer(self.npzfile, strict=False)
def tearDown(self):
if hasattr(self, 'npzfile'):
self.npzfile.close()
if self.file_type == 'filename':
os.remove(self.file)
def test_deserialize_hierarchy(self):
# Load a link
child = link.Chain()
with child.init_scope():
child.linear2 = links.Linear(2, 3)
target = link.Chain()
with target.init_scope():
target.linear = links.Linear(3, 2)
target.child = child
target_child_W = numpy.copy(child.linear2.W.data)
target_child_b = numpy.copy(child.linear2.b.data)
self.deserializer.load(target)
# Check
numpy.testing.assert_array_equal(
self.source.linear.W.data, target.linear.W.data)
numpy.testing.assert_array_equal(
self.source.linear.W.data, target.linear.W.data)
numpy.testing.assert_array_equal(
self.source.linear.b.data, target.linear.b.data)
numpy.testing.assert_array_equal(
target.child.linear2.W.data, target_child_W)
numpy.testing.assert_array_equal(
target.child.linear2.b.data, target_child_b)
class TestSerialize(unittest.TestCase):
def test_serialize(self):
obj = mock.MagicMock()
target = npz.serialize(obj)
assert obj.serialize.call_count == 1
(serializer,), _ = obj.serialize.call_args
assert isinstance(serializer, npz.DictionarySerializer)
assert isinstance(target, dict)
@testing.parameterize(
{'ignore_names': ['linear/W', 'child/linear2/b']},
{'ignore_names': lambda key: key in ['linear/W', 'child/linear2/b']},
{'ignore_names': [
lambda key: key in ['linear/W'],
lambda key: key in ['child/linear2/b']]},
{'ignore_names': [
lambda key: key in ['linear/W'],
'child/linear2/b']},
)
class TestNpzDeserializerIgnoreNamesGroupHierachy(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
child = link.Chain()
with child.init_scope():
child.linear2 = links.Linear(2, 3)
parent = link.Chain()
with parent.init_scope():
parent.linear = links.Linear(3, 2)
parent.child = child
npz.save_npz(self.temp_file_path, parent)
self.source = parent
self.npzfile = numpy.load(path)
self.deserializer = npz.NpzDeserializer(
self.npzfile, ignore_names=self.ignore_names)
def tearDown(self):
if hasattr(self, 'npzfile'):
self.npzfile.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_deserialize_ignore_names(self):
child = link.Chain()
with child.init_scope():
child.linear2 = links.Linear(2, 3)
target = link.Chain()
with target.init_scope():
target.linear = links.Linear(3, 2)
target.child = child
target_W = numpy.copy(target.linear.W.data)
target_child_b = numpy.copy(child.linear2.b.data)
self.deserializer.load(target)
numpy.testing.assert_array_equal(
self.source.linear.b.data, target.linear.b.data)
numpy.testing.assert_array_equal(
self.source.child.linear2.W.data, target.child.linear2.W.data)
numpy.testing.assert_array_equal(
target.linear.W.data, target_W)
numpy.testing.assert_array_equal(
target.child.linear2.b.data, target_child_b)
@testing.parameterize(*testing.product({
'compress': [False, True],
'file_type': ['filename', 'bytesio'],
}))
class TestSaveNpz(unittest.TestCase):
def setUp(self):
if self.file_type == 'filename':
fd, path = tempfile.mkstemp()
os.close(fd)
self.file = path
elif self.file_type == 'bytesio':
self.file = six.BytesIO()
else:
assert False
def tearDown(self):
if self.file_type == 'filename':
os.remove(self.file)
def test_save(self):
obj = mock.MagicMock()
npz.save_npz(self.file, obj, self.compress)
self.assertEqual(obj.serialize.call_count, 1)
(serializer,), _ = obj.serialize.call_args
self.assertIsInstance(serializer, npz.DictionarySerializer)
@testing.parameterize(*testing.product({
'compress': [False, True],
'file_type': ['filename', 'bytesio'],
}))
class TestLoadNpz(unittest.TestCase):
def setUp(self):
if self.file_type == 'filename':
fd, path = tempfile.mkstemp()
os.close(fd)
self.file = path
elif self.file_type == 'bytesio':
self.file = six.BytesIO()
else:
assert False
child = link.Chain()
with child.init_scope():
child.child_linear = links.Linear(2, 3)
parent = link.Chain()
with parent.init_scope():
parent.parent_linear = links.Linear(3, 2)
parent.child = child
npz.save_npz(self.file, parent, self.compress)
if self.file_type == 'bytesio':
self.file.seek(0)
self.source_child = child
self.source_parent = parent
def tearDown(self):
if self.file_type == 'filename':
os.remove(self.file)
def test_load_with_strict(self):
obj = mock.MagicMock()
npz.load_npz(self.file, obj)
self.assertEqual(obj.serialize.call_count, 1)
(serializer,), _ = obj.serialize.call_args
self.assertIsInstance(serializer, npz.NpzDeserializer)
self.assertTrue(serializer.strict)
def test_load_without_strict(self):
obj = mock.MagicMock()
npz.load_npz(self.file, obj, strict=False)
self.assertEqual(obj.serialize.call_count, 1)
(serializer,), _ = obj.serialize.call_args
self.assertFalse(serializer.strict)
self.assertIsInstance(serializer, npz.NpzDeserializer)
def test_load_with_path(self):
target = link.Chain()
with target.init_scope():
target.child_linear = links.Linear(2, 3)
npz.load_npz(self.file, target, 'child/')
numpy.testing.assert_array_equal(
self.source_child.child_linear.W.data, target.child_linear.W.data)
def test_load_without_path(self):
target = link.Chain()
with target.init_scope():
target.parent_linear = links.Linear(3, 2)
npz.load_npz(self.file, target, path='')
numpy.testing.assert_array_equal(
self.source_parent.parent_linear.W.data,
target.parent_linear.W.data)
@testing.parameterize(*testing.product({
'compress': [False, True],
'file_type': ['filename', 'bytesio'],
}))
class TestGroupHierachy(unittest.TestCase):
def setUp(self):
if self.file_type == 'filename':
fd, path = tempfile.mkstemp()
os.close(fd)
self.file = path
elif self.file_type == 'bytesio':
self.file = six.BytesIO()
else:
assert False
child = link.Chain()
with child.init_scope():
child.linear = links.Linear(2, 3)
child.Wc = chainer.Parameter(shape=(2, 3))
self.parent = link.Chain()
with self.parent.init_scope():
self.parent.child = child
self.parent.Wp = chainer.Parameter(shape=(2, 3))
self.optimizer = optimizers.AdaDelta()
self.optimizer.setup(self.parent)
self.parent.cleargrads()
self.optimizer.update() # init all states
self.savez = numpy.savez_compressed if self.compress else numpy.savez
def tearDown(self):
if self.file_type == 'filename':
os.remove(self.file)
def _save(self, target, obj, name):
serializer = npz.DictionarySerializer(target, name)
serializer.save(obj)
def _savez(self, file, d):
if self.file_type == 'filename':
f = open(self.file, 'wb')
elif self.file_type == 'bytesio':
f = self.file
else:
assert False
self.savez(f, **d)
if self.file_type == 'bytesio':
self.file.seek(0)
def _save_npz(self, file, obj, compress):
npz.save_npz(file, obj, compress)
if self.file_type == 'bytesio':
self.file.seek(0)
def _check_chain_group(self, npzfile, state, prefix=''):
keys = ('child/linear/W',
'child/linear/b',
'child/Wc') + state
self.assertSetEqual(set(npzfile.keys()), {prefix + x for x in keys})
def _check_optimizer_group(self, npzfile, state, prefix=''):
keys = ('child/linear/W/t',
'child/linear/W/msg',
'child/linear/W/msdx',
'child/linear/b/t',
'child/linear/b/msg',
'child/linear/b/msdx',
'child/Wc/t',
'child/Wc/msg',
'child/Wc/msdx') + state
self.assertEqual(set(npzfile.keys()),
{prefix + x for x in keys})
def test_save_chain(self):
d = {}
self._save(d, self.parent, 'test/')
self._savez(self.file, d)
with numpy.load(self.file) as f:
self._check_chain_group(f, ('Wp',), 'test/')
def test_save_optimizer(self):
d = {}
self._save(d, self.optimizer, 'test/')
self._savez(self.file, d)
with numpy.load(self.file) as npzfile:
self._check_optimizer_group(
npzfile, ('Wp/t', 'Wp/msg', 'Wp/msdx', 'epoch', 't'), 'test/')
def test_save_chain2(self):
self._save_npz(self.file, self.parent, self.compress)
with numpy.load(self.file) as npzfile:
self._check_chain_group(npzfile, ('Wp',))
def test_save_optimizer2(self):
self._save_npz(self.file, self.optimizer, self.compress)
with numpy.load(self.file) as npzfile:
self._check_optimizer_group(
npzfile, ('Wp/t', 'Wp/msg', 'Wp/msdx', 'epoch', 't'))
def test_load_optimizer_with_strict(self):
for param in self.parent.params():
param.data.fill(1)
self._save_npz(self.file, self.parent, self.compress)
for param in self.parent.params():
param.data.fill(0)
npz.load_npz(self.file, self.parent)
for param in self.parent.params():
self.assertTrue((param.data == 1).all())
def test_load_optimizer_without_strict(self):
for param in self.parent.params():
param.data.fill(1)
self._save_npz(self.file, self.parent, self.compress)
# Remove a param
del self.parent.child.linear.b
for param in self.parent.params():
param.data.fill(0)
npz.load_npz(self.file, self.parent, strict=False)
for param in self.parent.params():
self.assertTrue((param.data == 1).all())
self.assertFalse(hasattr(self.parent.child.linear, 'b'))
testing.run_module(__name__, __file__)
| 22,252
| 31.486131
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/serializers_tests/test_hdf5.py
|
import os
import sys
import tempfile
import unittest
import mock
import numpy
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import link
from chainer import links
from chainer import optimizers
from chainer.serializers import hdf5
from chainer import testing
from chainer.testing import attr
import chainerx
if hdf5._available:
import h5py
# The tests call `fd, path = tempfile.mkstemp(); os.close(fd)` rather than
# `with tempfile.TemporaryFile() as f:` because the file-like objects support
# for `h5py.File` is from h5py>=2.9 (h5py/h5py#1061). h5py>=2.5 is supported.
#
# `os.remove(path)` is necessary. The tests could utilize
# `tempfile.NamedTemporaryFile` but cannot utilize its with-blocks because it
# is platform-dependent behavior to open `f.name` before `f.file` is closed.
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestHDF5Serializer(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
self.hdf5file = h5py.File(path, 'w')
self.serializer = hdf5.HDF5Serializer(self.hdf5file, compression=3)
self.data = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
def tearDown(self):
if hasattr(self, 'hdf5file'):
self.hdf5file.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_get_item(self):
child = self.serializer['x']
self.assertIsInstance(child, hdf5.HDF5Serializer)
self.assertEqual(child.group.name, '/x')
self.assertEqual(child.compression, 3)
def check_serialize(self, data):
ret = self.serializer('w', data)
dset = self.hdf5file['w']
self.assertIsInstance(dset, h5py.Dataset)
self.assertEqual(dset.shape, data.shape)
self.assertEqual(dset.size, data.size)
self.assertEqual(dset.dtype, data.dtype)
read = numpy.empty((2, 3), dtype=numpy.float32)
dset.read_direct(read)
numpy.testing.assert_array_equal(read, backend.CpuDevice().send(data))
self.assertEqual(dset.compression_opts, 3)
self.assertIs(ret, data)
@attr.chainerx
def test_serialize_chainerx(self):
self.check_serialize(chainerx.asarray(self.data))
def test_serialize_cpu(self):
self.check_serialize(self.data)
@attr.gpu
def test_serialize_gpu(self):
self.check_serialize(cuda.to_gpu(self.data))
def test_serialize_scalar(self):
ret = self.serializer('x', 10)
dset = self.hdf5file['x']
self.assertIsInstance(dset, h5py.Dataset)
self.assertEqual(dset.shape, ())
self.assertEqual(dset.size, 1)
self.assertEqual(dset.dtype, int)
read = numpy.empty((), dtype=numpy.int32)
dset.read_direct(read)
self.assertEqual(read, 10)
self.assertEqual(dset.compression_opts, None)
self.assertIs(ret, 10)
@unittest.skipUnless(hdf5._available and
h5py.version.version_tuple >= (2, 7, 0),
'h5py>=2.7.0 is not available')
def test_serialize_none(self):
ret = self.serializer('x', None)
self.assertIs(ret, None)
dset = self.hdf5file['x']
self.assertIsInstance(dset, h5py.Dataset)
self.assertIs(dset.shape, None)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestHDF5Deserializer(unittest.TestCase):
def setUp(self):
self.data = numpy.random.uniform(-1, 1, (2, 3)).astype(numpy.float32)
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
with h5py.File(path, 'w') as f:
f.require_group('x')
f.create_dataset('y', data=self.data)
f.create_dataset('z', data=numpy.asarray(10))
# h5py.Empty is introduced from 2.7.0
if h5py.version.version_tuple >= (2, 7, 0):
f.create_dataset('w', data=h5py.Empty('f'))
self.hdf5file = h5py.File(path, 'r')
self.deserializer = hdf5.HDF5Deserializer(self.hdf5file)
def tearDown(self):
if hasattr(self, 'hdf5file'):
self.hdf5file.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_get_item(self):
child = self.deserializer['x']
self.assertIsInstance(child, hdf5.HDF5Deserializer)
self.assertEqual(child.group.name, '/x')
def check_deserialize(self, y):
ret = self.deserializer('y', y)
numpy.testing.assert_array_equal(backend.CpuDevice().send(y),
self.data)
self.assertIs(ret, y)
def check_deserialize_none_value(self, y):
ret = self.deserializer('y', None)
numpy.testing.assert_array_equal(backend.CpuDevice().send(ret),
self.data)
@attr.chainerx
def test_deserialize_chainerx(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(chainerx.asarray(y))
@attr.chainerx
@attr.gpu
def test_deserialize_chainerx_non_native(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(chainerx.asarray(y, device='cuda:0'))
def test_deserialize_cpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(y)
def test_deserialize_none_value_cpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize_none_value(y)
@attr.gpu
def test_deserialize_gpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(cuda.to_gpu(y))
@attr.ideep
def test_deserialize_ideep(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize(intel64.mdarray(y))
@attr.gpu
def test_deserialize_none_value_gpu(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
self.check_deserialize_none_value(cuda.to_gpu(y))
def test_deserialize_different_dtype_cpu(self):
y = numpy.empty((2, 3), dtype=numpy.float16)
ret = self.deserializer('y', y)
self.assertIs(ret, y)
# Compare the value with the original array. Note that it's not always
# bit-identical to the result of numpy.ndarray.astype.
numpy.testing.assert_allclose(
y, self.data.astype(numpy.float16),
rtol=1e-3, atol=1e-3)
# It should be bit-identical to the result directly retrieved from
# h5py.
arr_hdf5 = numpy.empty((2, 3), dtype=numpy.float16)
fd, path = tempfile.mkstemp()
os.close(fd)
try:
with h5py.File(path, 'w') as f:
f.create_dataset('a', data=self.data)
f['a'].read_direct(arr_hdf5)
finally:
os.remove(path)
numpy.testing.assert_array_equal(y, arr_hdf5)
@attr.gpu
def test_deserialize_different_dtype_gpu(self):
y = cuda.cupy.empty((2, 3), dtype=numpy.float16)
ret = self.deserializer('y', y)
numpy.testing.assert_array_equal(
y.get(), self.data.astype(numpy.float16))
self.assertIs(ret, y)
def test_deserialize_scalar(self):
z = 5
ret = self.deserializer('z', z)
self.assertEqual(ret, 10)
def test_string(self):
fd, path = tempfile.mkstemp()
os.close(fd)
try:
data = 'abc'
with h5py.File(path, 'w') as f:
f.create_dataset('str', data=data)
with h5py.File(path, 'r') as f:
deserializer = hdf5.HDF5Deserializer(f)
ret = deserializer('str', '')
self.assertEqual(ret, data)
finally:
os.remove(path)
@unittest.skipUnless(hdf5._available and
h5py.version.version_tuple >= (2, 7, 0),
'h5py>=2.7.0 is not available')
def test_deserialize_none(self):
ret = self.deserializer('w', None)
self.assertIs(ret, None)
@unittest.skipUnless(hdf5._available and
h5py.version.version_tuple >= (2, 7, 0),
'h5py>=2.7.0 is not available')
def test_deserialize_none_by_passing_array(self):
y = numpy.empty((1,))
ret = self.deserializer('w', y)
self.assertIs(ret, None)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestHDF5DeserializerNonStrict(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
with h5py.File(path, 'w') as f:
f.require_group('x')
self.hdf5file = h5py.File(path, 'r')
self.deserializer = hdf5.HDF5Deserializer(self.hdf5file, strict=False)
def tearDown(self):
if hasattr(self, 'hdf5file'):
self.hdf5file.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_deserialize_partial(self):
y = numpy.empty((2, 3), dtype=numpy.float32)
ret = self.deserializer('y', y)
self.assertIs(ret, y)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestHDF5DeserializerNonStrictGroupHierachy(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
child = link.Chain()
with child.init_scope():
child.linear = links.Linear(2, 3)
parent = link.Chain()
with parent.init_scope():
parent.linear = links.Linear(3, 2)
parent.child = child
hdf5.save_hdf5(self.temp_file_path, parent)
self.source = parent
self.hdf5file = h5py.File(path, 'r')
self.deserializer = hdf5.HDF5Deserializer(self.hdf5file, strict=False)
def tearDown(self):
if hasattr(self, 'hdf5file'):
self.hdf5file.close()
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_deserialize_hierarchy(self):
child = link.Chain()
with child.init_scope():
child.linear2 = links.Linear(2, 3)
target = link.Chain()
with target.init_scope():
target.linear = links.Linear(3, 2)
target.child = child
target_child_W = numpy.copy(child.linear2.W.data)
target_child_b = numpy.copy(child.linear2.b.data)
self.deserializer.load(target)
numpy.testing.assert_array_equal(
self.source.linear.W.data, target.linear.W.data)
numpy.testing.assert_array_equal(
self.source.linear.b.data, target.linear.b.data)
numpy.testing.assert_array_equal(
target.child.linear2.W.data, target_child_W)
numpy.testing.assert_array_equal(
target.child.linear2.b.data, target_child_b)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestSaveHDF5(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
def tearDown(self):
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_save(self):
obj = mock.MagicMock()
hdf5.save_hdf5(self.temp_file_path, obj, compression=3)
self.assertEqual(obj.serialize.call_count, 1)
(serializer,), _ = obj.serialize.call_args
self.assertIsInstance(serializer, hdf5.HDF5Serializer)
self.assertEqual(serializer.compression, 3)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestLoadHDF5(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
# Make a hdf5 file with empty data
h5py.File(path, 'w')
def tearDown(self):
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def test_load(self):
obj = mock.MagicMock()
hdf5.load_hdf5(self.temp_file_path, obj)
self.assertEqual(obj.serialize.call_count, 1)
(serializer,), _ = obj.serialize.call_args
self.assertIsInstance(serializer, hdf5.HDF5Deserializer)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestGroupHierachy(unittest.TestCase):
def setUp(self):
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
child = link.Chain()
with child.init_scope():
child.linear = links.Linear(2, 3)
child.Wc = chainer.Parameter(shape=(2, 3))
self.parent = link.Chain()
with self.parent.init_scope():
self.parent.child = child
self.parent.Wp = chainer.Parameter(shape=(2, 3))
self.optimizer = optimizers.AdaDelta()
self.optimizer.setup(self.parent)
self.parent.cleargrads()
self.optimizer.update() # init states
def _save(self, h5, obj, name):
group = h5.create_group(name)
serializer = hdf5.HDF5Serializer(group)
serializer.save(obj)
def _load(self, h5, obj, name):
group = h5[name]
serializer = hdf5.HDF5Deserializer(group)
serializer.load(obj)
def tearDown(self):
if hasattr(self, 'temp_file_path'):
os.remove(self.temp_file_path)
def _check_group(self, h5, state):
self.assertSetEqual(set(h5.keys()),
set(('child',) + state))
self.assertSetEqual(set(h5['child'].keys()),
{'linear', 'Wc'})
self.assertSetEqual(set(h5['child']['linear'].keys()),
{'W', 'b'})
def test_save_chain(self):
with h5py.File(self.temp_file_path, 'w') as h5:
self._save(h5, self.parent, 'test')
self.assertSetEqual(set(h5.keys()), {'test'})
self._check_group(h5['test'], ('Wp',))
def test_save_optimizer(self):
with h5py.File(self.temp_file_path, 'w') as h5:
self._save(h5, self.optimizer, 'test')
self.assertSetEqual(set(h5.keys()), {'test'})
self._check_group(h5['test'], ('Wp', 'epoch', 't'))
def test_save_chain2(self):
hdf5.save_hdf5(self.temp_file_path, self.parent)
with h5py.File(self.temp_file_path, 'r') as h5:
self._check_group(h5, ('Wp',))
def test_save_optimizer2(self):
hdf5.save_hdf5(self.temp_file_path, self.optimizer)
with h5py.File(self.temp_file_path, 'r') as h5:
self._check_group(h5, ('Wp', 'epoch', 't'))
def test_load_chain(self):
with h5py.File(self.temp_file_path, 'w') as h5:
self._save(h5, self.parent, 'test')
with h5py.File(self.temp_file_path, 'r') as h5:
self._load(h5, self.parent, 'test')
def test_load_optimizer(self):
with h5py.File(self.temp_file_path, 'w') as h5:
self._save(h5, self.optimizer, 'test')
with h5py.File(self.temp_file_path, 'r') as h5:
self._load(h5, self.optimizer, 'test')
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class TestNoH5py(unittest.TestCase):
def setUp(self):
# Remove h5py from sys.modules to emulate situation that h5py is not
# installed.
sys.modules['h5py'] = None
def tearDown(self):
sys.modules['h5py'] = h5py
def test_raise(self):
del sys.modules['chainer.serializers.hdf5']
del sys.modules['chainer.serializers.npz']
del sys.modules['chainer.serializers']
import chainer.serializers
self.assertFalse(chainer.serializers.hdf5._available)
with self.assertRaises(RuntimeError):
chainer.serializers.save_hdf5(None, None, None)
with self.assertRaises(RuntimeError):
chainer.serializers.load_hdf5(None, None)
with self.assertRaises(RuntimeError):
chainer.serializers.HDF5Serializer(None)
with self.assertRaises(RuntimeError):
chainer.serializers.HDF5Deserializer(None)
@unittest.skipUnless(hdf5._available, 'h5py is not available')
class Test5pyEmptyNotAvailable(unittest.TestCase):
def setUp(self):
# Set h5py.version.version_tuple to emulate situation that h5py is
# so old that it doesn't have h5py.Empty.
self.original_version_tuple = h5py.version.version_tuple
h5py.version.version_tuple = (2, 6, 0)
# Prepare serializer
fd, path = tempfile.mkstemp()
os.close(fd)
self.temp_file_path = path
self.hdf5file = h5py.File(path, 'w')
self.serializer = hdf5.HDF5Serializer(self.hdf5file, compression=3)
def tearDown(self):
h5py.version.version_tuple = self.original_version_tuple
def test_raise1(self):
with self.assertRaises(RuntimeError):
self.serializer('x', None)
testing.run_module(__name__, __file__)
| 17,165
| 32.461988
| 78
|
py
|
chainer
|
chainer-master/tests/chainer_tests/serializers_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/function_hooks_tests/test_cuda_profile.py
|
import unittest
import mock
import numpy
import chainer
from chainer.backends import cuda
from chainer import function_hooks
from chainer import testing
from chainer.testing import attr
@attr.gpu
@unittest.skipUnless(
cuda.available and cuda.cupy.cuda.nvtx_enabled, 'nvtx is not installed')
class TestCUDAProfileHook(unittest.TestCase):
def setUp(self):
self.h = function_hooks.CUDAProfileHook()
self.x = numpy.random.uniform(-1, 1, (2, 3)).astype('f')
self.gy = numpy.random.uniform(-1, 1, (2, 3)).astype('f')
def test_name(self):
self.assertEqual(self.h.name, 'CUDAProfileHook')
def check_forward(self, x):
with mock.patch('cupy.cuda.nvtx.RangePush') as push, \
mock.patch('cupy.cuda.nvtx.RangePop') as pop:
with self.h:
chainer.Variable(x) + chainer.Variable(x)
push.assert_called_once_with('_ + _.forward')
pop.assert_called_once_with()
def test_forward_cpu(self):
self.check_forward(self.x)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x, gy):
y = chainer.Variable(x) + chainer.Variable(x)
y.grad = gy
with mock.patch('cupy.cuda.nvtx.RangePush') as push, \
mock.patch('cupy.cuda.nvtx.RangePop') as pop:
with self.h:
y.backward()
push.assert_called_once_with('_ + _.backward')
pop.assert_called_once_with()
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
class TestCUDAProfileHookNVTXUnavailable(unittest.TestCase):
def setUp(self):
self.nvtx_enabled = cuda.cupy.cuda.nvtx_enabled
cuda.cupy.cuda.nvtx_enabled = False
def tearDown(self):
cuda.cupy.cuda.nvtx_enabled = self.nvtx_enabled
def test_unavailable(self):
with self.assertRaises(RuntimeError):
function_hooks.CUDAProfileHook()
testing.run_module(__name__, __file__)
| 2,124
| 27.333333
| 76
|
py
|
chainer
|
chainer-master/tests/chainer_tests/function_hooks_tests/test_debug_print.py
|
import re
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import function_hooks
from chainer import testing
from chainer.testing import attr
class DummyFunction(chainer.Function):
def forward(self, inputs):
self.retain_inputs((0,))
return inputs[0],
def backward(self, inputs, grads):
return (grads[0],) + (None,) * (len(inputs) - 1)
class TestPrintHookToFunction(unittest.TestCase):
def setUp(self):
self.io = six.StringIO()
self.h = function_hooks.PrintHook(file=self.io)
self.f = DummyFunction()
self.x = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
self.gy = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
def test_forward_cpu(self):
self.f.add_hook(self.h)
self.f(chainer.Variable(self.x), chainer.Variable(self.x))
# In some environments, shape is long.
expect = r'''^function\tDummyFunction
input data
<variable at 0x[0-9a-f]+>
- device: CPU
- backend: <(type|class) 'numpy.ndarray'>
- shape: \(3L?, 5L?\)
- dtype: float32
- statistics: mean=[0-9.\-e]+, std=[0-9.\-e]+
- grad: None
<variable at 0x[0-9a-f]+>
- device: CPU
- backend: <(type|class) 'numpy.ndarray'>
- shape: \(3L?, 5L?\)
- dtype: float32
- statistics: mean=[0-9.\-e]+, std=[0-9.\-e]+
- grad: None$
'''
actual = self.io.getvalue()
self.assertTrue(re.match(expect, actual), actual)
@attr.gpu
def test_forward_gpu(self):
self.f.add_hook(self.h)
self.f(chainer.Variable(cuda.to_gpu(self.x)),
chainer.Variable(cuda.to_gpu(self.x)))
expect = r'''^function\tDummyFunction
input data
<variable at 0x[0-9a-f]+>
- device: <CUDA Device 0>
- backend: <(type|class) 'cupy._?core.core.ndarray'>
- shape: \(3L?, 5L?\)
- dtype: float32
- statistics: mean=[0-9.\-e]+, std=[0-9.\-e]+
- grad: None
<variable at 0x[0-9a-f]+>
- device: <CUDA Device 0>
- backend: <(type|class) 'cupy._?core.core.ndarray'>
- shape: \(3L?, 5L?\)
- dtype: float32
- statistics: mean=[0-9.\-e]+, std=[0-9.\-e]+
- grad: None$
'''
actual = self.io.getvalue()
self.assertTrue(re.match(expect, actual), actual)
def test_backward_cpu(self):
y = self.f(chainer.Variable(self.x), chainer.Variable(self.x))
y.grad = self.gy
self.f.add_hook(self.h)
y.backward()
expect = r'''^function\tDummyFunction
input data
<variable at 0x[0-9a-f]+>
- device: CPU
- backend: <(type|class) 'numpy.ndarray'>
- shape: \(3L?, 5L?\)
- dtype: float32
- statistics: mean=[0-9.\-e]+, std=[0-9.\-e]+
- grad: None
\(removed\)
output gradient
<variable at 0x[0-9a-f]+>
- device: CPU
- backend: <(type|class) 'numpy.ndarray'>
- shape: \(3L?, 5L?\)
- dtype: float32
- statistics: mean=[0-9.\-e]+, std=[0-9.\-e]+
- grad: mean=[0-9.\-e]+, std=[0-9.\-e]+$
'''
actual = self.io.getvalue()
self.assertTrue(re.match(expect, actual), actual)
@attr.gpu
def test_backward_gpu(self):
y = self.f(chainer.Variable(cuda.to_gpu(self.x)),
chainer.Variable(cuda.to_gpu(self.x)))
y.grad = cuda.to_gpu(self.gy)
self.f.add_hook(self.h)
y.backward()
expect = r'''^function\tDummyFunction
input data
<variable at 0x[0-9a-f]+>
- device: <CUDA Device 0>
- backend: <(type|class) 'cupy._?core.core.ndarray'>
- shape: \(3L?, 5L?\)
- dtype: float32
- statistics: mean=[0-9.\-e]+, std=[0-9.\-e]+
- grad: None
\(removed\)
output gradient
<variable at 0x[0-9a-f]+>
- device: <CUDA Device 0>
- backend: <(type|class) 'cupy._?core.core.ndarray'>
- shape: \(3L?, 5L?\)
- dtype: float32
- statistics: mean=[0-9.\-e]+, std=[0-9.\-e]+
- grad: mean=[0-9.\-e]+, std=[0-9.\-e]+$
'''
actual = self.io.getvalue()
self.assertTrue(re.match(expect, actual), actual)
testing.run_module(__name__, __file__)
| 3,892
| 26.807143
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/function_hooks_tests/test_timer.py
|
import os
import time
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import function_hooks
from chainer import functions
from chainer.functions.math import basic_math
from chainer import testing
from chainer.testing import attr
try:
_get_time = time.perf_counter
except AttributeError:
if os.name == 'nt':
_get_time = time.clock
else:
_get_time = time.time
def check_history(self, t, function_type, return_type):
func_name = t[0]
assert func_name == function_type.__name__
assert isinstance(t[1], return_type)
class SimpleLink(chainer.Link):
def __init__(self):
super(SimpleLink, self).__init__()
with self.init_scope():
init_w = numpy.random.uniform(-1, 1, (3, 5)).astype(
numpy.float32)
self.w = chainer.Parameter(init_w)
def forward(self, x):
return self.w * x
class TestTimerHookToLink(unittest.TestCase):
def setUp(self):
self.h = function_hooks.TimerHook()
self.layer = SimpleLink()
self.x = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
self.gy = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
def test_name(self):
assert self.h.name == 'TimerHook'
def check_forward(self, x):
with self.h:
self.layer(chainer.Variable(x))
assert len(self.h.call_history) == 1
check_history(self, self.h.call_history[0], basic_math.Mul, float)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.layer.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x, gy):
x = chainer.Variable(x)
y = self.layer(x)
y.grad = gy
with self.h:
y.backward()
# It includes forward of + that accumulates gradients to W and b
assert len(self.h.call_history) == 3
for entry in self.h.call_history:
if entry[0] == 'Add':
continue
check_history(self, entry, basic_math.Mul, float)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.layer.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
class TestTimerHookToFunction(unittest.TestCase):
def setUp(self):
self.h = function_hooks.TimerHook()
self.f = functions.math.exponential.Exp()
self.f.add_hook(self.h)
self.x = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
self.gy = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
def check_forward(self, x):
self.f.apply((chainer.Variable(x),))
assert len(self.h.call_history) == 1
check_history(self, self.h.call_history[0],
functions.math.exponential.Exp, float)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x, gy):
x = chainer.Variable(x)
y = self.f.apply((x,))[0]
y.grad = gy
y.backward()
assert len(self.h.call_history) == 2
check_history(self, self.h.call_history[1],
functions.math.exponential.Exp, float)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def test_reentrant(self):
# In/grad data are random; these do not simulate the actually possible
# cases.
# any function other than Exp is ok
g = functions.math.identity.Identity()
self.h.backward_preprocess(self.f, (self.x,), (self.gy,))
t1 = _get_time()
time.sleep(0.001) # longer than each hook call
self.h.forward_preprocess(g, (self.x,))
self.h.forward_postprocess(g, (self.x,))
t2 = _get_time()
self.h.backward_postprocess(self.f, (self.x,), (self.gy,))
history = dict(self.h.call_history)
assert len(history) == 2
assert self.f._impl_name in history
assert g._impl_name in history
f_time = history[self.f._impl_name]
g_time = history[g._impl_name]
assert g_time <= t2 - t1
assert f_time >= t2 - t1
def test_reentrant_total_time(self):
g = functions.math.identity.Identity()
t0 = _get_time()
self.h.backward_preprocess(self.f, (self.x,), (self.gy,))
t1 = _get_time()
self.h.forward_preprocess(g, (self.x,))
time.sleep(0.001)
self.h.forward_postprocess(g, (self.x,))
t2 = _get_time()
self.h.backward_postprocess(self.f, (self.x,), (self.gy,))
t3 = _get_time()
assert self.h.total_time() <= t3 - t0
assert self.h.total_time() >= t2 - t1
@testing.parameterize(
{'unit': 'sec'},
{'unit': 'ms'},
{'unit': 'us'},
{'unit': 'ns'},
{'unit': 'auto'},
{'unit': 'auto_foreach'},
)
class TestTimerPrintReport(unittest.TestCase):
def setUp(self):
self.h = function_hooks.TimerHook()
self.f = functions.math.exponential.Exp()
self.f.add_hook(self.h)
self.x = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
def test_summary(self):
x = self.x
self.f.apply((chainer.Variable(x),))
self.f.apply((chainer.Variable(x),))
assert len(self.h.call_history) == 2
assert len(self.h.summary()) == 1
def test_print_report(self):
x = self.x
self.f.apply((chainer.Variable(x),))
self.f.apply((chainer.Variable(x),))
io = six.StringIO()
self.h.print_report(unit=self.unit, file=io)
expect = r'''\AFunctionName +ElapsedTime +Occurrence
+Exp +[0-9.\-e]+(.s|sec) +[0-9]+
\Z'''
actual = io.getvalue()
six.assertRegex(self, actual, expect)
testing.run_module(__name__, __file__)
| 6,276
| 29.177885
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/function_hooks_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/function_hooks_tests/test_cupy_memory_profile.py
|
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import function_hooks
from chainer import functions
from chainer.functions.math import basic_math
from chainer import testing
from chainer.testing import attr
def check_history(self, t, function_type, used_bytes_type,
acquired_bytes_type):
func_name = t[0]
assert func_name == function_type.__name__
self.assertIsInstance(t[1], used_bytes_type)
self.assertIsInstance(t[2], acquired_bytes_type)
class SimpleLink(chainer.Link):
def __init__(self):
super(SimpleLink, self).__init__()
with self.init_scope():
init_w = numpy.random.uniform(-1, 1, (3, 5)).astype(
numpy.float32)
self.w = chainer.Parameter(init_w)
def forward(self, x):
return self.w * x
@attr.gpu
class TestCupyMemoryProfileHookToLink(unittest.TestCase):
def setUp(self):
self.h = function_hooks.CupyMemoryProfileHook()
self.l = SimpleLink()
self.x = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
self.gy = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
def test_name(self):
self.assertEqual(self.h.name, 'CupyMemoryProfileHook')
def check_forward(self, x):
with self.h:
self.l(chainer.Variable(x))
self.assertEqual(1, len(self.h.call_history))
check_history(self, self.h.call_history[0],
basic_math.Mul, int, int)
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.l.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x, gy):
x = chainer.Variable(x)
y = self.l(x)
y.grad = gy
with self.h:
y.backward()
# It includes forward of + that accumulates gradients to W and b
self.assertEqual(3, len(self.h.call_history))
for entry in self.h.call_history:
if entry[0] == 'Add':
continue
check_history(self, entry,
basic_math.Mul, int, int)
def test_backward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.l.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
class TestCupyMemoryProfileHookToFunction(unittest.TestCase):
def setUp(self):
self.h = function_hooks.CupyMemoryProfileHook()
self.f = functions.math.exponential.Exp()
self.f.add_hook(self.h)
self.x = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
self.gy = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
def tearDown(self):
self.f.delete_hook(self.h.name)
def check_forward(self, x):
self.f.apply((chainer.Variable(x),))
self.assertEqual(1, len(self.h.call_history))
check_history(self, self.h.call_history[0],
functions.math.exponential.Exp, int, int)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x, gy):
x = chainer.Variable(x)
y = self.f.apply((x,))[0]
y.grad = gy
y.backward()
self.assertEqual(2, len(self.h.call_history))
check_history(self, self.h.call_history[1],
functions.math.exponential.Exp, int, int)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def test_reentrant(self):
# In/grad data are random; these do not simulate the actually possible
# cases.
f = self.f
# any function other than f: Exp is ok
g = functions.math.identity.Identity()
self.h.backward_preprocess(f, (self.x,), (self.gy,))
self.h.forward_preprocess(g, (self.x,))
self.h._memory_hook.used_bytes += 512
self.h._memory_hook.acquired_bytes += 512
self.h.forward_postprocess(g, (self.x,))
self.h._memory_hook.used_bytes += 512
self.h._memory_hook.acquired_bytes += 512
self.h.backward_postprocess(f, (self.x,), (self.gy,))
history = {f: (u, a, d) for (f, u, a, d) in self.h.call_history}
self.assertEqual(len(history), 2)
self.assertIn(f._impl_name, history)
self.assertIn(g._impl_name, history)
f_used_bytes, f_acquired_bytes, f_depth = history[f._impl_name]
g_used_bytes, g_acquired_bytes, g_depth = history[g._impl_name]
self.assertEqual(f_depth, 0)
self.assertEqual(g_depth, 1)
self.assertGreater(f_used_bytes, g_used_bytes)
self.assertGreater(f_acquired_bytes, g_acquired_bytes)
def test_reentrant_total_bytes(self):
f = self.f
g = functions.math.identity.Identity()
self.h.backward_preprocess(f, (self.x,), (self.gy,))
self.h.forward_preprocess(g, (self.x,))
self.h._memory_hook.used_bytes += 512
self.h._memory_hook.acquired_bytes += 512
self.h.forward_postprocess(g, (self.x,))
self.h._memory_hook.used_bytes += 512
self.h._memory_hook.acquired_bytes += 512
self.h.backward_postprocess(f, (self.x,), (self.gy,))
self.assertEqual(self.h.total_used_bytes(), 1024)
self.assertEqual(self.h.total_acquired_bytes(), 1024)
@attr.gpu
class TestCupyMemoryProfileReportBase(unittest.TestCase):
def setUp(self):
cuda.memory_pool.free_all_blocks()
self.h = function_hooks.CupyMemoryProfileHook()
f1 = functions.exp
f2 = functions.relu
self.x = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
x = cuda.to_gpu(self.x)
with self.h:
f1(chainer.Variable(x))
f1(chainer.Variable(x))
f2(chainer.Variable(x))
f2(chainer.Variable(x))
class TestCupyMemoryProfilerStatistics(TestCupyMemoryProfileReportBase):
def test_call_history(self):
self.assertEqual(4, len(self.h.call_history))
def test_total_used_bytes(self):
self.assertNotEqual(0, self.h.total_used_bytes())
def test_total_acquired_bytes(self):
self.assertNotEqual(0, self.h.total_acquired_bytes())
def test_summary(self):
self.assertEqual(2, len(self.h.summary()))
@testing.parameterize(
{'unit': 'B'},
{'unit': 'KB'},
{'unit': 'MB'},
{'unit': 'GB'},
{'unit': 'TB'},
{'unit': 'PB'},
{'unit': 'EB'},
{'unit': 'ZB'},
{'unit': 'auto'},
{'unit': 'auto_foreach'},
)
@attr.gpu
class TestCupyMemoryProfileReportPrintUnit(TestCupyMemoryProfileReportBase):
def test_print_report(self):
io = six.StringIO()
self.h.print_report(unit=self.unit, file=io)
expect = r'''\AFunctionName UsedBytes AcquiredBytes Occurrence
+Exp +[0-9.\-e]+.?B +[0-9.\-e]+.?B +[0-9]+
+ReLU +[0-9.\-e]+.?B +[0-9.\-e]+.?B +[0-9]+$
'''
actual = io.getvalue()
six.assertRegex(self, actual, expect)
@testing.parameterize(
{'unit': 'B', 'denomi': 1024 ** 0, 'bytes': -1},
{'unit': 'B', 'denomi': 1024 ** 0, 'bytes': 0},
{'unit': 'B', 'denomi': 1024 ** 0, 'bytes': 1},
{'unit': 'B', 'denomi': 1024 ** 0, 'bytes': 512},
{'unit': 'B', 'denomi': 1024 ** 0, 'bytes': 1023},
{'unit': 'KB', 'denomi': 1024 ** 1, 'bytes': 1024},
{'unit': 'KB', 'denomi': 1024 ** 1, 'bytes': 1024 ** 2 - 1},
{'unit': 'MB', 'denomi': 1024 ** 2, 'bytes': 1024 ** 2},
{'unit': 'GB', 'denomi': 1024 ** 3, 'bytes': 1024 ** 3},
{'unit': 'TB', 'denomi': 1024 ** 4, 'bytes': 1024 ** 4},
{'unit': 'PB', 'denomi': 1024 ** 5, 'bytes': 1024 ** 5},
{'unit': 'EB', 'denomi': 1024 ** 6, 'bytes': 1024 ** 6},
{'unit': 'ZB', 'denomi': 1024 ** 7, 'bytes': 1024 ** 7},
{'unit': 'ZB', 'denomi': 1024 ** 7, 'bytes': 1024 ** 8},
)
@attr.gpu
class TestCupyMemoryProfileReportChooseUnit(unittest.TestCase):
def test_choose_unit(self):
h = function_hooks.CupyMemoryProfileHook()
self.assertEqual((self.denomi, self.unit), h._choose_unit(self.bytes))
testing.run_module(__name__, __file__)
| 8,189
| 32.565574
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/graph_optimization_tests/test_static_graph_models.py
|
import unittest
import numpy
import chainer
from chainer import configuration
from chainer import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer.graph_optimizations.static_graph import static_graph
import chainer.links as L
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class StaticMLP(chainer.Chain):
def __init__(self, in_size, n_out, W_dtype, x_dtype):
super(StaticMLP, self).__init__()
with self.init_scope():
self.l1 = links.Linear(
in_size, n_out,
initialW=chainer.initializers.Normal(1, W_dtype),
initial_bias=chainer.initializers.Normal(1, x_dtype))
@static_graph(verbosity_level=2)
def __call__(self, x):
return F.relu(self.l1(x))
class DynamicMLP(chainer.Chain):
def __init__(self, in_size, n_out, W_dtype, x_dtype):
super(DynamicMLP, self).__init__()
with self.init_scope():
self.l1 = links.Linear(
in_size, n_out,
initialW=chainer.initializers.Normal(1, W_dtype),
initial_bias=chainer.initializers.Normal(1, x_dtype))
def __call__(self, x):
return F.relu(self.l1(x))
class MLP(chainer.Chain):
def __init__(self, in_size, n_out, W_dtype, x_dtype):
super(MLP, self).__init__()
with self.init_scope():
initialW = chainer.initializers.Normal(1, W_dtype)
initial_bias = chainer.initializers.Normal(1, x_dtype)
self.l1 = links.Linear(in_size,
n_out,
initialW=initialW,
initial_bias=initial_bias)
self.mode = 'static'
def __call__(self, x):
if self.mode == 'static':
return self.static_call(x)
else:
return self.dynamic_call(x)
def dynamic_call(self, x):
# Dynamic graph only.
return F.relu(self.l1(x))
@static_graph(verbosity_level=2)
def static_call(self, x):
# Static graph.
return F.relu(self.l1(x))
@testing.parameterize(*testing.product({
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'W_dtype': [numpy.float32],
'use_static_graph': [True, False],
}))
class TestSimpleChain(unittest.TestCase):
def setUp(self):
self.batch_size = 4
self.in_units = 5
self.out_units = 6
x_size = (self.batch_size, self.in_units)
self.x = numpy.random.uniform(size=x_size).astype(self.x_dtype)
gy_size = (self.batch_size, self.out_units)
self.gy = numpy.random.uniform(size=gy_size).astype(self.x_dtype)
self.chain = MLP(self.in_units,
self.out_units,
self.W_dtype,
self.x_dtype)
self.chain.l1.cleargrads()
self.check_forward_options = {}
self.check_backward_options = {'atol': 1e-2, 'rtol': 5e-2}
self.dynamic_chain = DynamicMLP(self.in_units,
self.out_units,
self.W_dtype,
self.x_dtype)
self.static_chain = StaticMLP(self.in_units,
self.out_units,
self.W_dtype,
self.x_dtype)
def check_forward(self, x):
chain = self.chain
y_dyn = chain.dynamic_call(x)
use_static_graph = self.use_static_graph
with chainer.using_config('use_static_graph', use_static_graph), \
chainer.using_config('enable_backprop', False):
y_static = chain.static_call(x)
y_static = chain.static_call(x)
y_static = chain.static_call(x)
assert use_static_graph == hasattr(chain, 'schedule_manager')
assert use_static_graph == hasattr(chain, 'static_schedule')
chainer.testing.assert_allclose(y_dyn.data, y_static.data)
def test_forward_cpu(self):
self.check_forward(self.x)
def test_forward_cpu2(self):
y_dyn = self.chain.dynamic_call(self.x)
x2 = 2*self.x
# todo: add a new config so that we can still use 'train'
with configuration.using_config('train', False):
y_static1 = self.chain.static_call(x2)
y_static1.grad = y_static1.data.copy()
y_static1.backward()
schedule_manager = self.chain.schedule_manager
print('sched 1: ', schedule_manager)
y_static = self.chain.static_call(self.x)
chainer.testing.assert_allclose(y_dyn.data, y_static.data)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.chain.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad, chain):
gradient_check.check_backward(
chain, x_data, y_grad, (chain.l1.W, chain.l1.b),
dtype='f', **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
chain = self.static_chain
with configuration.using_config('train', False):
self.check_backward(self.x, self.gy, chain)
class MNISTStaticMLP(chainer.Chain):
"""This is the network from the MNIST example.
Static version.
"""
def __init__(self, n_units, n_out):
super(MNISTStaticMLP, self).__init__()
with self.init_scope():
# the size of the inputs to each layer will be inferred
self.l1 = L.Linear(None, n_units) # n_in -> n_units
self.l2 = L.Linear(None, n_units) # n_units -> n_units
self.l3 = L.Linear(None, n_out) # n_units -> n_out
@static_graph(verbosity_level=2)
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
class MNISTDynamicMLP(chainer.Chain):
"""This is the network from the MNIST example.
Dynamic version.
"""
def __init__(self, n_units, n_out):
super(MNISTDynamicMLP, self).__init__()
with self.init_scope():
# the size of the inputs to each layer will be inferred
self.l1 = L.Linear(None, n_units) # n_in -> n_units
self.l2 = L.Linear(None, n_units) # n_units -> n_units
self.l3 = L.Linear(None, n_out) # n_units -> n_out
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
@testing.parameterize(*testing.product({
'x_dtype': [numpy.float32],
'W_dtype': [numpy.float32],
}))
class TestMultiLayerChain(unittest.TestCase):
def setUp(self):
self.batch_size = 4
self.in_units = 5
self.out_units = 6
self.hidden_units = 5
x_size = (self.batch_size, self.in_units)
self.x = numpy.random.uniform(size=x_size).astype(self.x_dtype)
gy_size = (self.batch_size, self.out_units)
self.gy = numpy.random.uniform(size=gy_size).astype(self.x_dtype)
self.chain = MLP(self.in_units,
self.out_units,
self.W_dtype,
self.x_dtype)
self.chain.l1.cleargrads()
self.check_forward_options = {}
self.check_backward_options = {'atol': 1e-2, 'rtol': 5e-2}
self.dynamic_chain = MNISTDynamicMLP(self.hidden_units, self.out_units)
self.static_chain = MNISTStaticMLP(self.hidden_units, self.out_units)
def check_network_params_are_equal(self):
static_W1_data = self.static_chain.l1.W.data
dyn_W1_data = self.dynamic_chain.l1.W.data
chainer.testing.assert_allclose(static_W1_data, dyn_W1_data)
static_W2_data = self.static_chain.l2.W.data
dyn_W2_data = self.dynamic_chain.l2.W.data
chainer.testing.assert_allclose(static_W2_data, dyn_W2_data)
static_W3_data = self.static_chain.l3.W.data
dyn_W3_data = self.dynamic_chain.l3.W.data
chainer.testing.assert_allclose(static_W3_data, dyn_W3_data)
static_b1_data = self.static_chain.l1.b.data
dyn_b1_data = self.dynamic_chain.l1.b.data
chainer.testing.assert_allclose(static_b1_data, dyn_b1_data)
static_b2_data = self.static_chain.l2.b.data
dyn_b2_data = self.dynamic_chain.l2.b.data
chainer.testing.assert_allclose(static_b2_data, dyn_b2_data)
static_b3_data = self.static_chain.l3.b.data
dyn_b3_data = self.dynamic_chain.l3.b.data
chainer.testing.assert_allclose(static_b3_data, dyn_b3_data)
static_W1_grad = self.static_chain.l1.W.grad
dyn_W1_grad = self.dynamic_chain.l1.W.grad
print('static_W1_grad: ', static_W1_grad)
print('dyn_W1_grad: ', dyn_W1_grad)
chainer.testing.assert_allclose(static_W1_grad, dyn_W1_grad)
static_W2_grad = self.static_chain.l2.W.grad
dyn_W2_grad = self.dynamic_chain.l2.W.grad
chainer.testing.assert_allclose(static_W2_grad, dyn_W2_grad)
static_W3_grad = self.static_chain.l3.W.grad
dyn_W3_grad = self.dynamic_chain.l3.W.grad
chainer.testing.assert_allclose(static_W3_grad, dyn_W3_grad)
static_b1_grad = self.static_chain.l1.b.grad
dyn_b1_grad = self.dynamic_chain.l1.b.grad
chainer.testing.assert_allclose(static_b1_grad, dyn_b1_grad)
static_b2_grad = self.static_chain.l2.b.grad
dyn_b2_grad = self.dynamic_chain.l2.b.grad
chainer.testing.assert_allclose(static_b2_grad, dyn_b2_grad)
static_b3_grad = self.static_chain.l3.b.grad
dyn_b3_grad = self.dynamic_chain.l3.b.grad
chainer.testing.assert_allclose(static_b3_grad, dyn_b3_grad)
def test_backward_custom_cpu(self):
# Verify the both the Dynamic and Static networks produce the same
# results on forward and backward passes.
print('debug: Original input variable array: ', self.x)
x_var_dyn = chainer.Variable(self.x)
y_dyn = self.dynamic_chain(x_var_dyn)
y_dyn.grad = self.gy
y_dyn.backward()
self.dynamic_chain.cleargrads()
x_var_dyn.grad_var = None
# Do forward and backward pass on the static chain and then
# set its parameters to the same values as the dynamic chain.
x_var_static = chainer.Variable(self.x.copy())
y_static = self.static_chain(x_var_static)
y_static.grad = self.gy
y_static.backward()
self.static_chain.cleargrads()
x_var_static.grad_var = None
self.static_chain.l1.W.data = self.dynamic_chain.l1.W.data.copy()
self.static_chain.l1.b.data = self.dynamic_chain.l1.b.data.copy()
self.static_chain.l2.W.data[...] = self.dynamic_chain.l2.W.data
self.static_chain.l2.b.data[...] = self.dynamic_chain.l2.b.data
self.static_chain.l3.W.data[...] = self.dynamic_chain.l3.W.data
self.static_chain.l3.b.data[...] = self.dynamic_chain.l3.b.data
# Do forward pass and verify that the outputs match the dynamic
# chain.
# Use a different input variable for this pass.
x_size = (self.batch_size, self.in_units)
new_x_data = numpy.random.uniform(size=x_size).astype(self.x_dtype)
print('debug: 2nd iteration input variable array: ', new_x_data)
x_var_dyn = chainer.Variable(new_x_data)
x_var_static = chainer.Variable(new_x_data.copy())
y_static = self.static_chain(x_var_static)
assert y_static.data is not None
y_dyn = self.dynamic_chain(x_var_dyn)
assert y_dyn.data is not None
chainer.testing.assert_allclose(y_dyn.data, y_static.data)
# Use a different gy for the backward pass:
y_size = (self.batch_size, self.out_units)
new_y_data = numpy.random.uniform(size=y_size).astype(self.x_dtype)
print('debug: 2nd iteration gy variable array: ', new_y_data)
x_var_static.grad = None
self.static_chain.cleargrads()
y_static.grad = new_y_data
y_static.backward()
x_var_dyn.grad = None
self.dynamic_chain.cleargrads()
y_dyn.grad = new_y_data.copy()
y_dyn.backward()
assert x_var_dyn.grad is not None
assert x_var_static.grad is not None
chainer.testing.assert_allclose(x_var_dyn.grad, x_var_static.grad)
self.check_network_params_are_equal()
n_size = (self.batch_size, self.in_units)
noise1 = 0.1*numpy.random.uniform(size=n_size).astype(self.x_dtype)
x_pass1 = new_x_data + noise1
# Modify l2.W's data:
l2s = self.static_chain.l2.W.data.shape
new_l2_W_data = 0.1*numpy.random.uniform(size=l2s).astype(self.x_dtype)
self.static_chain.l2.W.data = new_l2_W_data
self.dynamic_chain.l2.W.data = new_l2_W_data
ns = (self.batch_size, self.out_units)
new_y_data = numpy.random.uniform(size=ns).astype(self.x_dtype)
x_var_static.data = x_pass1
y_static = self.static_chain(x_var_static)
assert y_static.data is not None
y_static.grad = new_y_data
self.static_chain.cleargrads()
y_static.backward()
x_var_dyn.data = x_pass1
y_dyn = self.dynamic_chain(x_var_dyn)
assert y_dyn.data is not None
y_dyn.grad = new_y_data.copy()
self.dynamic_chain.cleargrads()
y_dyn.backward()
chainer.testing.assert_allclose(y_dyn.data, y_static.data)
self.check_network_params_are_equal()
assert x_var_dyn.grad is not None
assert x_var_static.grad is not None
chainer.testing.assert_allclose(x_var_dyn.grad, x_var_static.grad)
class StaticBN(chainer.Chain):
def __init__(self, in_size, dtype, use_gamma, use_beta):
super(StaticBN, self).__init__()
with self.init_scope():
self.l1 = links.BatchNormalization(
in_size,
dtype=dtype,
use_gamma=use_gamma,
use_beta=use_beta)
@static_graph(verbosity_level=2)
def __call__(self, x):
return F.relu(self.l1(x))
class DynamicBN(chainer.Chain):
def __init__(self, in_size, dtype, use_gamma, use_beta):
super(DynamicBN, self).__init__()
with self.init_scope():
self.l1 = links.BatchNormalization(
in_size,
dtype=dtype,
use_gamma=use_gamma,
use_beta=use_beta)
def __call__(self, x):
return F.relu(self.l1(x))
class BN(chainer.Chain):
def __init__(self, in_size, dtype, use_gamma, use_beta):
super(BN, self).__init__()
with self.init_scope():
self.l1 = links.BatchNormalization(
in_size,
dtype=dtype,
use_gamma=use_gamma,
use_beta=use_beta)
self.mode = 'static'
def __call__(self, x):
if self.mode == 'static':
return self.static_call(x)
else:
return self.dynamic_call(x)
def dynamic_call(self, x):
# Dynamic graph only.
return F.relu(self.l1(x))
@static_graph(verbosity_level=2)
def static_call(self, x):
# Static graph.
return F.relu(self.l1(x))
@testing.parameterize(*testing.product({
'x_dtype': [numpy.float16, numpy.float32, numpy.float64],
'use_static_graph': [True, False],
'use_gamma': [True, False],
'use_beta': [True, False],
}))
class TestBNChain(unittest.TestCase):
def setUp(self):
self.batch_size = 4
self.in_units = 5
self.out_units = 5
x_size = (self.batch_size, self.in_units)
self.x = numpy.random.uniform(size=x_size).astype(self.x_dtype)
gy_size = (self.batch_size, self.out_units)
self.gy = numpy.random.uniform(size=gy_size).astype(self.x_dtype)
self.chain = BN(
self.in_units,
self.x_dtype,
self.use_gamma,
self.use_beta)
self.chain.l1.cleargrads()
self.check_forward_options = {}
self.check_backward_options = {'atol': 1e-2, 'rtol': 5e-2}
self.dynamic_chain = DynamicBN(
self.in_units,
self.x_dtype,
self.use_gamma,
self.use_beta)
self.static_chain = StaticBN(
self.in_units,
self.x_dtype,
self.use_gamma,
self.use_beta)
def check_forward(self, x):
chain = self.chain
y_dyn = chain.dynamic_call(x)
use_static_graph = self.use_static_graph
with chainer.using_config('use_static_graph', use_static_graph), \
chainer.using_config('enable_backprop', False):
y_static = chain.static_call(x)
y_static = chain.static_call(x)
y_static = chain.static_call(x)
assert use_static_graph == hasattr(chain, 'schedule_manager')
assert use_static_graph == hasattr(chain, 'static_schedule')
chainer.testing.assert_allclose(y_dyn.data, y_static.data)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
with testing.assert_warns(DeprecationWarning):
self.chain.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad, chain):
to_check = tuple()
if self.use_gamma:
to_check += (chain.l1.gamma,)
if self.use_beta:
to_check += (chain.l1.beta,)
gradient_check.check_backward(
chain, x_data, y_grad, to_check,
dtype='f', **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
chain = self.static_chain
with configuration.using_config('train', False):
self.check_backward(self.x, self.gy, chain)
testing.run_module(__name__, __file__)
if __name__ == '__main__':
unittest.main()
| 18,125
| 35.10757
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/graph_optimization_tests/__init__.py
| 0
| 0
| 0
|
py
|
|
chainer
|
chainer-master/tests/chainer_tests/datasets_tests/test_text_dataset.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import pickle
import unittest
import six
from chainer import datasets
from chainer import testing
class TestTextDataset(unittest.TestCase):
def setUp(self):
self.root = os.path.join(os.path.dirname(__file__), 'text_dataset')
def _dataset(self, path, **kwargs):
def _absolute(p):
return '{}{}{}'.format(self.root, os.sep, p)
if isinstance(path, six.string_types):
path = _absolute(path)
else:
path = [_absolute(p) for p in path]
return datasets.TextDataset(path, **kwargs)
def test_close(self):
ds = self._dataset('ascii_1.txt')
assert ds[0] == 'hello\n'
ds.close()
with self.assertRaises(ValueError):
ds[0]
def test_close_exception(self):
ds = self._dataset(['ascii_1.txt', 'ascii_1.txt', 'ascii_1.txt'])
assert not ds._fps[0].closed
assert not ds._fps[1].closed
assert not ds._fps[2].closed
ds._fps[1] = None
with self.assertRaises(AttributeError):
ds.close()
assert ds._fps[0].closed
assert ds._fps[2].closed
def test_len(self):
ds = self._dataset('ascii_1.txt')
assert len(ds) == 3
def test_len_noeol(self):
# No linefeed at the end of the file.
ds = self._dataset('ascii_noeol.txt', encoding=['ascii'])
assert len(ds) == 3
def test_len_unicode(self):
ds = self._dataset(['utf8_1.txt'], encoding='utf-8')
assert len(ds) == 3
def test_len_multiple(self):
ds = self._dataset(['utf8_1.txt', 'utf8_2.txt'], encoding='utf-8')
assert len(ds) == 3
def test_get(self):
ds = self._dataset(['ascii_1.txt'])
assert ds[0] == 'hello\n'
assert ds[1] == 'world\n'
assert ds[2] == 'test\n'
def test_get_unicode(self):
ds = self._dataset(['utf8_1.txt'], encoding='utf-8')
assert ds[0] == 'テスト1\n'
assert ds[1] == 'テスト2\n'
assert ds[2] == 'Test3\n'
def test_get_crlf(self):
ds = self._dataset(['utf8_crlf.txt'], encoding='utf-8')
assert ds[0] == 'テスト1\n'
assert ds[1] == 'テスト2\n'
assert ds[2] == 'Test3\n'
def test_get_multiple(self):
ds = self._dataset(['utf8_1.txt', 'utf8_2.txt'], encoding='utf-8')
assert ds[0] == ('テスト1\n', 'Test1\n')
assert ds[1] == ('テスト2\n', 'テスト2\n')
assert ds[2] == ('Test3\n', 'テスト3\n')
def test_get_blank(self):
# File with blank (empty) line.
ds = self._dataset(['ascii_blank_line.txt'], encoding='ascii')
assert ds[0] == 'hello\n'
assert ds[1] == 'world\n'
assert ds[2] == '\n'
assert ds[3] == 'test\n'
def test_encoding(self):
# UTF-8 with BOM
ds = self._dataset(['utf8sig.txt'], encoding='utf-8-sig')
assert ds[0] == 'テスト1\n'
assert ds[1] == 'Test2\n'
assert ds[2] == 'Test3\n'
def test_encoding_multiple(self):
ds = self._dataset(
['ascii_1.txt', 'utf8_1.txt'],
encoding=['ascii', 'utf-8'])
assert ds[0] == ('hello\n', 'テスト1\n')
assert ds[1] == ('world\n', 'テスト2\n')
assert ds[2] == ('test\n', 'Test3\n')
def test_errors(self):
ds = self._dataset(
['utf8_1.txt'], encoding='ascii', errors='ignore')
assert ds[0] == '1\n' # "テスト" is ignored
assert ds[1] == '2\n'
assert ds[2] == 'Test3\n'
def test_newline(self):
# CRLF
ds = self._dataset(['utf8_crlf.txt'], encoding='utf-8', newline='\r\n')
assert ds[0] == 'テスト1\r\n'
assert ds[1] == 'テスト2\r\n'
assert ds[2] == 'Test3\r\n'
def test_filter(self):
def _filter(line):
return line != 'world\n'
ds = self._dataset(['ascii_1.txt'], filter_func=_filter)
assert len(ds) == 2
assert ds[0] == 'hello\n'
assert ds[1] == 'test\n'
def test_filter_multiple(self):
def _filter(s1, s2):
return s1 != 'world\n' and 'test' in s2
ds = self._dataset(['ascii_1.txt', 'ascii_2.txt'], filter_func=_filter)
assert len(ds) == 2
assert ds[0] == ('hello\n', 'test file\n')
assert ds[1] == ('test\n', 'world test\n')
def test_pickle_unpickle(self):
ds1 = self._dataset(['utf8_1.txt', 'utf8_2.txt'], encoding='utf-8')
assert ds1[0] == ('テスト1\n', 'Test1\n')
ds2 = pickle.loads(pickle.dumps(ds1))
assert ds1[1] == ('テスト2\n', 'テスト2\n')
assert ds2[1] == ('テスト2\n', 'テスト2\n')
testing.run_module(__name__, __file__)
| 4,703
| 29.947368
| 79
|
py
|
chainer
|
chainer-master/tests/chainer_tests/datasets_tests/test_tuple_dataset.py
|
import unittest
import numpy
from chainer.backends import cuda
from chainer import datasets
from chainer import testing
from chainer.testing import attr
class TestTupleDataset(unittest.TestCase):
def setUp(self):
self.x0 = numpy.random.rand(3, 4)
self.x1 = numpy.random.rand(3, 5)
self.z0 = numpy.random.rand(4, 4)
def check_tuple_dataset(self, x0, x1):
td = datasets.TupleDataset(x0, x1)
self.assertEqual(len(td), len(x0))
for i in range(len(x0)):
example = td[i]
self.assertEqual(len(example), 2)
numpy.testing.assert_array_equal(
cuda.to_cpu(example[0]), cuda.to_cpu(x0[i]))
numpy.testing.assert_array_equal(
cuda.to_cpu(example[1]), cuda.to_cpu(x1[i]))
example_range = td[0: len(x0)]
for i in range(len(x0)):
example = example_range[i]
self.assertEqual(len(example), 2)
numpy.testing.assert_array_equal(
cuda.to_cpu(example[0]), cuda.to_cpu(x0[i]))
numpy.testing.assert_array_equal(
cuda.to_cpu(example[1]), cuda.to_cpu(x1[i]))
def test_tuple_dataset_cpu(self):
self.check_tuple_dataset(self.x0, self.x1)
@attr.gpu
def test_tuple_dataset_gpu(self):
self.check_tuple_dataset(cuda.to_gpu(self.x0), cuda.to_gpu(self.x1))
def test_tuple_dataset_len_mismatch(self):
with self.assertRaises(ValueError):
datasets.TupleDataset(self.x0, self.z0)
def test_tuple_dataset_overrun(self):
td = datasets.TupleDataset(self.x0, self.x1)
with self.assertRaises(IndexError):
td[3]
testing.run_module(__name__, __file__)
| 1,737
| 28.457627
| 76
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.