repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/resnetd.py | """
ResNet(D) with dilation for ImageNet-1K, implemented in Gluon.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['ResNetD', 'resnetd50b', 'resnetd101b', 'resnetd152b']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import MultiOutputSequential
from .resnet import ResUnit, ResInitBlock
from .senet import SEInitBlock
class ResNetD(HybridBlock):
"""
ResNet(D) with dilation model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
ordinary_init : bool, default False
Whether to use original initial block or SENet one.
bends : tuple of int, default None
Numbers of bends for multiple output.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
bn_use_global_stats=False,
bn_cudnn_off=False,
ordinary_init=False,
bends=None,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ResNetD, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.multi_output = (bends is not None)
with self.name_scope():
self.features = MultiOutputSequential(prefix="")
if ordinary_init:
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
else:
init_block_channels = 2 * init_block_channels
self.features.add(SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if ((j == 0) and (i != 0) and (i < 2)) else 1
dilation = (2 ** max(0, i - 1 - int(j == 0)))
stage.add(ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=dilation,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
if self.multi_output and ((i + 1) in bends):
stage.do_output = True
self.features.add(stage)
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
outs = self.features(x)
x = outs[0]
x = self.output(x)
if self.multi_output:
return [x] + outs[1:]
else:
return x
def get_resnetd(blocks,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ResNet(D) with dilation model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14:
layers = [2, 2, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet(D) with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNetD(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def resnetd50b(**kwargs):
"""
ResNet(D)-50 with dilation model with stride at the second convolution in bottleneck block from 'Deep Residual
Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnetd(blocks=50, conv1_stride=False, model_name="resnetd50b", **kwargs)
def resnetd101b(**kwargs):
"""
ResNet(D)-101 with dilation model with stride at the second convolution in bottleneck block from 'Deep Residual
Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnetd(blocks=101, conv1_stride=False, model_name="resnetd101b", **kwargs)
def resnetd152b(**kwargs):
"""
ResNet(D)-152 with dilation model with stride at the second convolution in bottleneck block from 'Deep Residual
Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnetd(blocks=152, conv1_stride=False, model_name="resnetd152b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
ordinary_init = False
bends = None
pretrained = False
models = [
resnetd50b,
resnetd101b,
resnetd152b,
]
for model in models:
net = model(
pretrained=pretrained,
ordinary_init=ordinary_init,
bends=bends)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
if ordinary_init:
assert (model != resnetd50b or weight_count == 25557032)
assert (model != resnetd101b or weight_count == 44549160)
assert (model != resnetd152b or weight_count == 60192808)
else:
assert (model != resnetd50b or weight_count == 25680808)
assert (model != resnetd101b or weight_count == 44672936)
assert (model != resnetd152b or weight_count == 60316584)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
if bends is not None:
y = y[0]
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 10,821 | 34.250814 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/quartznet.py | """
QuartzNet for ASR, implemented in Gluon.
Original paper: 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions,'
https://arxiv.org/abs/1910.10261.
"""
__all__ = ['quartznet5x5_en_ls', 'quartznet15x5_en', 'quartznet15x5_en_nr', 'quartznet15x5_fr', 'quartznet15x5_de',
'quartznet15x5_it', 'quartznet15x5_es', 'quartznet15x5_ca', 'quartznet15x5_pl', 'quartznet15x5_ru',
'quartznet15x5_ru34']
from .jasper import get_jasper
def quartznet5x5_en_ls(classes=29, **kwargs):
"""
QuartzNet 5x5 model for English language (trained on LibriSpeech dataset) from 'QuartzNet: Deep Automatic Speech
Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
return get_jasper(classes=classes, version=("quartznet", "5x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet5x5_en_ls", **kwargs)
def quartznet15x5_en(classes=29, **kwargs):
"""
QuartzNet 15x5 model for English language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_en", **kwargs)
def quartznet15x5_en_nr(classes=29, **kwargs):
"""
QuartzNet 15x5 model for English language (with presence of noise) from 'QuartzNet: Deep Automatic Speech
Recognition with 1D Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'"]
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_en_nr", **kwargs)
def quartznet15x5_fr(classes=43, **kwargs):
"""
QuartzNet 15x5 model for French language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 43
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'ç', 'é', 'â', 'ê', 'î', 'ô', 'û', 'à', 'è', 'ù', 'ë', 'ï',
'ü', 'ÿ']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_fr", **kwargs)
def quartznet15x5_de(classes=32, **kwargs):
"""
QuartzNet 15x5 model for German language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 32
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', 'ä', 'ö', 'ü', 'ß']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_de", **kwargs)
def quartznet15x5_it(classes=39, **kwargs):
"""
QuartzNet 15x5 model for Italian language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 39
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ì', 'î', 'ó', 'ò', 'ú', 'ù']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_it", **kwargs)
def quartznet15x5_es(classes=36, **kwargs):
"""
QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 36
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'á', 'é', 'í', 'ó', 'ú', 'ñ', 'ü']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_es", **kwargs)
def quartznet15x5_ca(classes=39, **kwargs):
"""
QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 39
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't', 'u', 'v', 'w', 'x', 'y', 'z', "'", 'à', 'é', 'è', 'í', 'ï', 'ó', 'ò', 'ú', 'ü', 'ŀ']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_ca", **kwargs)
def quartznet15x5_pl(classes=34, **kwargs):
"""
QuartzNet 15x5 model for Spanish language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 34
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'a', 'ą', 'b', 'c', 'ć', 'd', 'e', 'ę', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'ł', 'm', 'n', 'ń',
'o', 'ó', 'p', 'r', 's', 'ś', 't', 'u', 'w', 'y', 'z', 'ź', 'ż']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_pl", **kwargs)
def quartznet15x5_ru(classes=35, **kwargs):
"""
QuartzNet 15x5 model for Russian language from 'QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel
Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 35
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ё', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с',
'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_ru", **kwargs)
def quartznet15x5_ru34(classes=34, **kwargs):
"""
QuartzNet 15x5 model for Russian language (32 graphemes) from 'QuartzNet: Deep Automatic Speech Recognition with 1D
Time-Channel Separable Convolutions,' https://arxiv.org/abs/1910.10261.
Parameters:
----------
classes : int, default 34
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
vocabulary = [' ', 'а', 'б', 'в', 'г', 'д', 'е', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т',
'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я']
return get_jasper(classes=classes, version=("quartznet", "15x5"), use_dw=True, vocabulary=vocabulary,
model_name="quartznet15x5_ru34", **kwargs)
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def _test():
import numpy as np
import mxnet as mx
pretrained = False
from_audio = True
audio_features = 64
models = [
quartznet5x5_en_ls,
quartznet15x5_en,
quartznet15x5_en_nr,
quartznet15x5_fr,
quartznet15x5_de,
quartznet15x5_it,
quartznet15x5_es,
quartznet15x5_ca,
quartznet15x5_pl,
quartznet15x5_ru,
quartznet15x5_ru34,
]
for model in models:
net = model(
in_channels=audio_features,
from_audio=from_audio,
pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != quartznet5x5_en_ls or weight_count == 6713181)
assert (model != quartznet15x5_en or weight_count == 18924381)
assert (model != quartznet15x5_en_nr or weight_count == 18924381)
assert (model != quartznet15x5_fr or weight_count == 18938731)
assert (model != quartznet15x5_de or weight_count == 18927456)
assert (model != quartznet15x5_it or weight_count == 18934631)
assert (model != quartznet15x5_es or weight_count == 18931556)
assert (model != quartznet15x5_ca or weight_count == 18934631)
assert (model != quartznet15x5_pl or weight_count == 18929506)
assert (model != quartznet15x5_ru or weight_count == 18930531)
assert (model != quartznet15x5_ru34 or weight_count == 18929506)
batch = 3
aud_scale = 640 if from_audio else 1
seq_len = np.random.randint(150, 250, batch) * aud_scale
seq_len_max = seq_len.max() + 2
x_shape = (batch, seq_len_max) if from_audio else (batch, audio_features, seq_len_max)
x = mx.nd.random.normal(shape=x_shape, ctx=ctx)
x_len = mx.nd.array(seq_len, ctx=ctx, dtype=np.long)
y, y_len = net(x, x_len)
assert (y.shape[:2] == (batch, net.classes))
if from_audio:
assert (y.shape[2] in range(seq_len_max // aud_scale * 2, seq_len_max // aud_scale * 2 + 9))
else:
assert (y.shape[2] in [seq_len_max // 2, seq_len_max // 2 + 1])
if __name__ == "__main__":
_test()
| 14,466 | 42.185075 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/preresnet.py | """
PreResNet for ImageNet-1K, implemented in Gluon.
Original papers: 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
"""
__all__ = ['PreResNet', 'preresnet10', 'preresnet12', 'preresnet14', 'preresnetbc14b', 'preresnet16', 'preresnet18_wd4',
'preresnet18_wd2', 'preresnet18_w3d4', 'preresnet18', 'preresnet26', 'preresnetbc26b', 'preresnet34',
'preresnetbc38b', 'preresnet50', 'preresnet50b', 'preresnet101', 'preresnet101b', 'preresnet152',
'preresnet152b', 'preresnet200', 'preresnet200b', 'preresnet269b', 'PreResBlock', 'PreResBottleneck',
'PreResUnit', 'PreResInitBlock', 'PreResActivation']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import pre_conv1x1_block, pre_conv3x3_block, conv1x1
class PreResBlock(HybridBlock):
"""
Simple PreResNet block for residual path in PreResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_bias=False,
use_bn=True,
bn_use_global_stats=False,
**kwargs):
super(PreResBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = pre_conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats,
return_preact=True)
self.conv2 = pre_conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x, x_pre_activ = self.conv1(x)
x = self.conv2(x)
return x, x_pre_activ
class PreResBottleneck(HybridBlock):
"""
PreResNet bottleneck block for residual path in PreResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
conv1_stride,
**kwargs):
super(PreResBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=(strides if conv1_stride else 1),
bn_use_global_stats=bn_use_global_stats,
return_preact=True)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=(1 if conv1_stride else strides),
bn_use_global_stats=bn_use_global_stats)
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x, x_pre_activ = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x, x_pre_activ
class PreResUnit(HybridBlock):
"""
PreResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_bias=False,
use_bn=True,
bn_use_global_stats=False,
bottleneck=True,
conv1_stride=False,
**kwargs):
super(PreResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if bottleneck:
self.body = PreResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
conv1_stride=conv1_stride)
else:
self.body = PreResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias)
def hybrid_forward(self, F, x):
identity = x
x, x_pre_activ = self.body(x)
if self.resize_identity:
identity = self.identity_conv(x_pre_activ)
x = x + identity
return x
class PreResInitBlock(HybridBlock):
"""
PreResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(PreResInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=7,
strides=2,
padding=3,
use_bias=False,
in_channels=in_channels)
self.bn = nn.BatchNorm(
in_channels=out_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.bn(x)
x = self.activ(x)
x = self.pool(x)
return x
class PreResActivation(HybridBlock):
"""
PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_use_global_stats=False,
**kwargs):
super(PreResActivation, self).__init__(**kwargs)
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
return x
class PreResNet(HybridBlock):
"""
PreResNet model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(PreResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(PreResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_preresnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create PreResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported PreResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = PreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def preresnet10(**kwargs):
"""
PreResNet-10 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=10, model_name="preresnet10", **kwargs)
def preresnet12(**kwargs):
"""
PreResNet-12 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=12, model_name="preresnet12", **kwargs)
def preresnet14(**kwargs):
"""
PreResNet-14 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=14, model_name="preresnet14", **kwargs)
def preresnetbc14b(**kwargs):
"""
PreResNet-BC-14b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="preresnetbc14b", **kwargs)
def preresnet16(**kwargs):
"""
PreResNet-16 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=16, model_name="preresnet16", **kwargs)
def preresnet18_wd4(**kwargs):
"""
PreResNet-18 model with 0.25 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, width_scale=0.25, model_name="preresnet18_wd4", **kwargs)
def preresnet18_wd2(**kwargs):
"""
PreResNet-18 model with 0.5 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, width_scale=0.5, model_name="preresnet18_wd2", **kwargs)
def preresnet18_w3d4(**kwargs):
"""
PreResNet-18 model with 0.75 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, width_scale=0.75, model_name="preresnet18_w3d4", **kwargs)
def preresnet18(**kwargs):
"""
PreResNet-18 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=18, model_name="preresnet18", **kwargs)
def preresnet26(**kwargs):
"""
PreResNet-26 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=26, bottleneck=False, model_name="preresnet26", **kwargs)
def preresnetbc26b(**kwargs):
"""
PreResNet-BC-26b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="preresnetbc26b", **kwargs)
def preresnet34(**kwargs):
"""
PreResNet-34 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=34, model_name="preresnet34", **kwargs)
def preresnetbc38b(**kwargs):
"""
PreResNet-BC-38b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="preresnetbc38b", **kwargs)
def preresnet50(**kwargs):
"""
PreResNet-50 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=50, model_name="preresnet50", **kwargs)
def preresnet50b(**kwargs):
"""
PreResNet-50 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=50, conv1_stride=False, model_name="preresnet50b", **kwargs)
def preresnet101(**kwargs):
"""
PreResNet-101 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=101, model_name="preresnet101", **kwargs)
def preresnet101b(**kwargs):
"""
PreResNet-101 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=101, conv1_stride=False, model_name="preresnet101b", **kwargs)
def preresnet152(**kwargs):
"""
PreResNet-152 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=152, model_name="preresnet152", **kwargs)
def preresnet152b(**kwargs):
"""
PreResNet-152 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=152, conv1_stride=False, model_name="preresnet152b", **kwargs)
def preresnet200(**kwargs):
"""
PreResNet-200 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=200, model_name="preresnet200", **kwargs)
def preresnet200b(**kwargs):
"""
PreResNet-200 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=200, conv1_stride=False, model_name="preresnet200b", **kwargs)
def preresnet269b(**kwargs):
"""
PreResNet-269 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=269, conv1_stride=False, model_name="preresnet269b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
preresnet10,
preresnet12,
preresnet14,
preresnetbc14b,
preresnet16,
preresnet18_wd4,
preresnet18_wd2,
preresnet18_w3d4,
preresnet18,
preresnet26,
preresnetbc26b,
preresnet34,
preresnetbc38b,
preresnet50,
preresnet50b,
preresnet101,
preresnet101b,
preresnet152,
preresnet152b,
preresnet200,
preresnet200b,
preresnet269b,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != preresnet10 or weight_count == 5417128)
assert (model != preresnet12 or weight_count == 5491112)
assert (model != preresnet14 or weight_count == 5786536)
assert (model != preresnetbc14b or weight_count == 10057384)
assert (model != preresnet16 or weight_count == 6967208)
assert (model != preresnet18_wd4 or weight_count == 3935960)
assert (model != preresnet18_wd2 or weight_count == 5802440)
assert (model != preresnet18_w3d4 or weight_count == 8473784)
assert (model != preresnet18 or weight_count == 11687848)
assert (model != preresnet26 or weight_count == 17958568)
assert (model != preresnetbc26b or weight_count == 15987624)
assert (model != preresnet34 or weight_count == 21796008)
assert (model != preresnetbc38b or weight_count == 21917864)
assert (model != preresnet50 or weight_count == 25549480)
assert (model != preresnet50b or weight_count == 25549480)
assert (model != preresnet101 or weight_count == 44541608)
assert (model != preresnet101b or weight_count == 44541608)
assert (model != preresnet152 or weight_count == 60185256)
assert (model != preresnet152b or weight_count == 60185256)
assert (model != preresnet200 or weight_count == 64666280)
assert (model != preresnet200b or weight_count == 64666280)
assert (model != preresnet269b or weight_count == 102065832)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 31,235 | 34.175676 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/lednet.py | """
LEDNet for image segmentation, implemented in Gluon.
Original paper: 'LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic Segmentation,'
https://arxiv.org/abs/1905.02423.
"""
__all__ = ['LEDNet', 'lednet_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3, conv1x1_block, conv3x3_block, conv5x5_block, conv7x7_block, ConvBlock, NormActivation,\
ChannelShuffle, InterpolationBlock, Hourglass, BreakBlock
class AsymConvBlock(HybridBlock):
"""
Asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
kernel_size : int
Convolution window size.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
lw_use_bn : bool, default True
Whether to use BatchNorm layer (leftwise convolution block).
rw_use_bn : bool, default True
Whether to use BatchNorm layer (rightwise convolution block).
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
lw_activation : function or str or None, default nn.Activation('relu')
Activation function after the leftwise convolution block.
rw_activation : function or str or None, default nn.Activation('relu')
Activation function after the rightwise convolution block.
"""
def __init__(self,
channels,
kernel_size,
padding,
dilation=1,
groups=1,
use_bias=False,
lw_use_bn=True,
rw_use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
lw_activation=(lambda: nn.Activation("relu")),
rw_activation=(lambda: nn.Activation("relu")),
**kwargs):
super(AsymConvBlock, self).__init__(**kwargs)
with self.name_scope():
self.lw_conv = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(kernel_size, 1),
strides=1,
padding=(padding, 0),
dilation=(dilation, 1),
groups=groups,
use_bias=use_bias,
use_bn=lw_use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=lw_activation)
self.rw_conv = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(1, kernel_size),
strides=1,
padding=(0, padding),
dilation=(1, dilation),
groups=groups,
use_bias=use_bias,
use_bn=rw_use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=rw_activation)
def hybrid_forward(self, F, x):
x = self.lw_conv(x)
x = self.rw_conv(x)
return x
def asym_conv3x3_block(padding=1,
**kwargs):
"""
3x3 asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
padding : int, default 1
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
lw_use_bn : bool, default True
Whether to use BatchNorm layer (leftwise convolution block).
rw_use_bn : bool, default True
Whether to use BatchNorm layer (rightwise convolution block).
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
lw_activation : function or str or None, default nn.Activation('relu')
Activation function after the leftwise convolution block.
rw_activation : function or str or None, default nn.Activation('relu')
Activation function after the rightwise convolution block.
"""
return AsymConvBlock(
kernel_size=3,
padding=padding,
**kwargs)
class LEDDownBlock(HybridBlock):
"""
LEDNet specific downscale block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
correct_size_mistmatch : bool
Whether to correct downscaled sizes of images.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
correct_size_mismatch,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(LEDDownBlock, self).__init__(**kwargs)
self.correct_size_mismatch = correct_size_mismatch
with self.name_scope():
self.pool = nn.MaxPool2D(
pool_size=2,
strides=2)
self.conv = conv3x3(
in_channels=in_channels,
out_channels=(out_channels - in_channels),
strides=2,
use_bias=True)
self.norm_activ = NormActivation(
in_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
def hybrid_forward(self, F, x):
y1 = self.pool(x)
y2 = self.conv(x)
if self.correct_size_mismatch:
diff_h = y2.size()[2] - y1.size()[2]
diff_w = y2.size()[3] - y1.size()[3]
y1 = F.pad(
y1,
mode="constant",
pad_width=(0, 0, 0, 0, diff_w // 2, diff_w - diff_w // 2, diff_h // 2, diff_h - diff_h // 2),
constant_value=0)
x = F.concat(y2, y1, dim=1)
x = self.norm_activ(x)
return x
class LEDBranch(HybridBlock):
"""
LEDNet encoder branch.
Parameters:
----------
channels : int
Number of input/output channels.
dilation : int
Dilation value for convolution layer.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
channels,
dilation,
dropout_rate,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(LEDBranch, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
with self.name_scope():
self.conv1 = asym_conv3x3_block(
channels=channels,
use_bias=True,
lw_use_bn=False,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv2 = asym_conv3x3_block(
channels=channels,
padding=dilation,
dilation=dilation,
use_bias=True,
lw_use_bn=False,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
rw_activation=None)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
if self.use_dropout:
x = self.dropout(x)
return x
class LEDUnit(HybridBlock):
"""
LEDNet encoder unit (Split-Shuffle-non-bottleneck).
Parameters:
----------
channels : int
Number of input/output channels.
dilation : int
Dilation value for convolution layer.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
channels,
dilation,
dropout_rate,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(LEDUnit, self).__init__(**kwargs)
mid_channels = channels // 2
with self.name_scope():
self.left_branch = LEDBranch(
channels=mid_channels,
dilation=dilation,
dropout_rate=dropout_rate,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.right_branch = LEDBranch(
channels=mid_channels,
dilation=dilation,
dropout_rate=dropout_rate,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.activ = nn.Activation("relu")
self.shuffle = ChannelShuffle(
channels=channels,
groups=2)
def hybrid_forward(self, F, x):
identity = x
x1, x2 = F.split(x, axis=1, num_outputs=2)
x1 = self.left_branch(x1)
x2 = self.right_branch(x2)
x = F.concat(x1, x2, dim=1)
x = x + identity
x = self.activ(x)
x = self.shuffle(x)
return x
class PoolingBranch(HybridBlock):
"""
Pooling branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_bias : bool
Whether the layer uses a bias vector.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool
Whether to disable CUDNN batch normalization operator.
in_size : tuple of 2 int or None
Spatial size of input image.
down_size : int
Spatial size of downscaled image.
"""
def __init__(self,
in_channels,
out_channels,
use_bias,
bn_epsilon,
bn_use_global_stats,
bn_cudnn_off,
in_size,
down_size,
**kwargs):
super(PoolingBranch, self).__init__(**kwargs)
self.in_size = in_size
self.down_size = down_size
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.up = InterpolationBlock(
scale_factor=None,
out_size=in_size)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.in_size is not None else x.shape[2:]
x = F.contrib.AdaptiveAvgPooling2D(x, output_size=self.down_size)
x = self.conv(x)
x = self.up(x, in_size)
return x
class APN(HybridBlock):
"""
Attention pyramid network block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool
Whether to disable CUDNN batch normalization operator.
in_size : tuple of 2 int or None
Spatial size of input image.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats,
bn_cudnn_off,
in_size,
**kwargs):
super(APN, self).__init__(**kwargs)
self.in_size = in_size
att_out_channels = 1
with self.name_scope():
self.pool_branch = PoolingBranch(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
in_size=in_size,
down_size=1)
self.body = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
down_seq = nn.HybridSequential(prefix="")
down_seq.add(conv7x7_block(
in_channels=in_channels,
out_channels=att_out_channels,
strides=2,
use_bias=True,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
down_seq.add(conv5x5_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
strides=2,
use_bias=True,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
down3_subseq = nn.HybridSequential(prefix="")
down3_subseq.add(conv3x3_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
strides=2,
use_bias=True,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
down3_subseq.add(conv3x3_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
use_bias=True,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
down_seq.add(down3_subseq)
up_seq = nn.HybridSequential(prefix="")
up = InterpolationBlock(scale_factor=2)
up_seq.add(up)
up_seq.add(up)
up_seq.add(up)
skip_seq = nn.HybridSequential(prefix="")
skip_seq.add(BreakBlock())
skip_seq.add(conv7x7_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
use_bias=True,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
skip_seq.add(conv5x5_block(
in_channels=att_out_channels,
out_channels=att_out_channels,
use_bias=True,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
self.hg = Hourglass(
down_seq=down_seq,
up_seq=up_seq,
skip_seq=skip_seq)
def hybrid_forward(self, F, x):
y = self.pool_branch(x)
w = self.hg(x)
x = self.body(x)
x = x * w
x = x + y
return x
class LEDNet(HybridBlock):
"""
LEDNet model from 'LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic Segmentation,'
https://arxiv.org/abs/1905.02423.
Parameters:
----------
channels : list of int
Number of output channels for each unit.
dilations : list of int
Dilations for units.
dropout_rates : list of list of int
Dropout rates for each unit in encoder.
correct_size_mistmatch : bool
Whether to correct downscaled sizes of images in encoder.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
channels,
dilations,
dropout_rates,
correct_size_mismatch=False,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
classes=19,
**kwargs):
super(LEDNet, self).__init__(**kwargs)
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
with self.name_scope():
self.encoder = nn.HybridSequential(prefix="")
for i, dilations_per_stage in enumerate(dilations):
out_channels = channels[i]
dropout_rate = dropout_rates[i]
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
for j, dilation in enumerate(dilations_per_stage):
if j == 0:
stage.add(LEDDownBlock(
in_channels=in_channels,
out_channels=out_channels,
correct_size_mismatch=correct_size_mismatch,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = out_channels
else:
stage.add(LEDUnit(
channels=in_channels,
dilation=dilation,
dropout_rate=dropout_rate,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
self.encoder.add(stage)
self.apn = APN(
in_channels=in_channels,
out_channels=classes,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
in_size=(in_size[0] // 8, in_size[1] // 8) if fixed_size else None)
self.up = InterpolationBlock(scale_factor=8)
def hybrid_forward(self, F, x):
x = self.encoder(x)
x = self.apn(x)
x = self.up(x)
return x
def get_lednet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create LEDNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = [32, 64, 128]
dilations = [[0, 1, 1, 1], [0, 1, 1], [0, 1, 2, 5, 9, 2, 5, 9, 17]]
dropout_rates = [0.03, 0.03, 0.3]
bn_epsilon = 1e-3
net = LEDNet(
channels=channels,
dilations=dilations,
dropout_rates=dropout_rates,
bn_epsilon=bn_epsilon,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def lednet_cityscapes(classes=19, **kwargs):
"""
LEDNet model for Cityscapes from 'LEDNet: A Lightweight Encoder-Decoder Network for Real-Time Semantic
Segmentation,' https://arxiv.org/abs/1905.02423.
Parameters:
----------
classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_lednet(classes=classes, model_name="lednet_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def _test():
import mxnet as mx
pretrained = False
fixed_size = True
correct_size_mismatch = False
in_size = (1024, 2048)
classes = 19
models = [
lednet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size,
correct_size_mismatch=correct_size_mismatch)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != lednet_cityscapes or weight_count == 922821)
batch = 4
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert (y.shape == (batch, classes, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 24,449 | 33.48519 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/superpointnet.py | """
SuperPointNet for HPatches (image matching), implemented in Gluon.
Original paper: 'SuperPoint: Self-Supervised Interest Point Detection and Description,'
https://arxiv.org/abs/1712.07629.
"""
__all__ = ['SuperPointNet', 'superpointnet']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv3x3_block
def interpolate_bilinear(p,
img,
img_size,
transpose=True):
"""
Bilinear interpolation.
Parameters:
----------
p : NDArray
Float coordinates.
img : NDArray
original image.
img_size : tuple of two inst
Image size.
transpose : bool, default True
Whether do transpose of the output against input.
Returns:
-------
zz : NDArray
Interpolated values.
"""
p0 = p.floor().astype(int)
p1 = p0 + 1
x = p.slice_axis(axis=1, begin=0, end=1).squeeze(axis=1)
y = p.slice_axis(axis=1, begin=1, end=2).squeeze(axis=1)
x0 = p0.slice_axis(axis=1, begin=0, end=1).squeeze(axis=1)
x1 = p1.slice_axis(axis=1, begin=0, end=1).squeeze(axis=1)
y0 = p0.slice_axis(axis=1, begin=1, end=2).squeeze(axis=1)
y1 = p1.slice_axis(axis=1, begin=1, end=2).squeeze(axis=1)
x0 = x0.clip(0, img_size[0] - 1)
x1 = x1.clip(0, img_size[0] - 1)
y0 = y0.clip(0, img_size[1] - 1)
y1 = y1.clip(0, img_size[1] - 1)
z00 = img[:, x0, y0].T
z01 = img[:, x0, y1].T
z10 = img[:, x1, y0].T
z11 = img[:, x1, y1].T
x0 = x0.astype(p.dtype)
x1 = x1.astype(p.dtype)
y0 = y0.astype(p.dtype)
y1 = y1.astype(p.dtype)
w00 = ((x - x0) * (y - y0)).expand_dims(axis=1)
w01 = ((x - x0) * (y1 - y)).expand_dims(axis=1)
w10 = ((x1 - x) * (y - y0)).expand_dims(axis=1)
w11 = ((x1 - x) * (y1 - y)).expand_dims(axis=1)
zz = (z00 * w11) + (z10 * w10) + (z01 * w01) + (z11 * w00)
if not transpose:
zz = zz.T
return zz
class SPHead(HybridBlock):
"""
SuperPointNet head block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
mid_channels,
out_channels,
**kwargs):
super(SPHead, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
use_bias=True,
use_bn=False)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class SPDetector(HybridBlock):
"""
SuperPointNet detector.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
conf_thresh : float, default 0.015
Confidence threshold.
nms_dist : int, default 4
NMS distance.
use_batch_box_nms : bool, default True
Whether allow to hybridize this block.
hybridizable : bool, default True
Whether allow to hybridize this block.
batch_size : int, default 1
Batch size.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
reduction : int, default 8
Feature reduction factor.
"""
def __init__(self,
in_channels,
mid_channels,
conf_thresh=0.015,
nms_dist=4,
use_batch_box_nms=True,
hybridizable=True,
batch_size=1,
in_size=(224, 224),
reduction=8,
**kwargs):
super(SPDetector, self).__init__(**kwargs)
assert ((batch_size is not None) or not hybridizable)
assert ((in_size is not None) or not hybridizable)
assert (use_batch_box_nms or not hybridizable)
self.conf_thresh = conf_thresh
self.nms_dist = nms_dist
self.use_batch_box_nms = use_batch_box_nms
self.hybridizable = hybridizable
self.batch_size = batch_size
self.in_size = in_size
self.reduction = reduction
num_classes = reduction * reduction + 1
with self.name_scope():
self.detector = SPHead(
in_channels=in_channels,
mid_channels=mid_channels,
out_channels=num_classes)
def hybrid_forward(self, F, x):
semi = self.detector(x)
dense = semi.softmax(axis=1)
nodust = dense.slice_axis(axis=1, begin=0, end=-1)
heatmap = nodust.transpose(axes=(0, 2, 3, 1))
heatmap = heatmap.reshape(shape=(0, 0, 0, self.reduction, self.reduction))
heatmap = heatmap.transpose(axes=(0, 1, 3, 2, 4))
in_size = self.in_size if self.in_size is not None else (x.shape[2] * self.reduction,
x.shape[3] * self.reduction)
batch_size = self.batch_size if self.batch_size is not None else x.shape[0]
if self.use_batch_box_nms:
heatmap = heatmap.reshape(shape=(0, -1))
in_nms = F.stack(
heatmap,
F.arange(in_size[0], repeat=in_size[1]).tile((batch_size, 1)),
F.arange(in_size[1]).tile((batch_size, in_size[0])),
F.zeros_like(heatmap) + self.nms_dist,
F.zeros_like(heatmap) + self.nms_dist,
axis=2)
out_nms = F.contrib.box_nms(
data=in_nms,
overlap_thresh=1e-3,
valid_thresh=self.conf_thresh,
coord_start=1,
score_index=0,
id_index=-1,
force_suppress=False,
in_format="center",
out_format="center")
confs = out_nms.slice_axis(axis=2, begin=0, end=1).reshape(shape=(0, -1))
pts = out_nms.slice_axis(axis=2, begin=1, end=3)
if self.hybridizable:
return pts, confs
confs_list = []
pts_list = []
counts = (confs > 0).sum(axis=1)
for i in range(batch_size):
count_i = int(counts[i].asscalar())
confs_i = confs[i].slice_axis(axis=0, begin=0, end=count_i)
pts_i = pts[i].slice_axis(axis=0, begin=0, end=count_i)
confs_list.append(confs_i)
pts_list.append(pts_i)
return pts_list, confs_list
else:
img_height = in_size[0]
img_width = in_size[1]
heatmap = heatmap.reshape(shape=(0, -3, -3)).expand_dims(axis=1)
heatmap = F.where(heatmap >= self.conf_thresh, heatmap, F.zeros_like(heatmap))
heatmap_mask = (heatmap >= 0)
pad = self.nms_dist
pad_width = (0, 0, 0, 0, pad, pad, pad, pad)
heatmap_mask2 = heatmap_mask.pad(mode="constant", pad_width=pad_width, constant_value=0)
confs_list = []
pts_list = []
for i in range(batch_size):
heatmap_i = heatmap[i].squeeze(axis=0)
heatmap_i_csr = heatmap_i.tostype("csr")
row_sizes = heatmap_i_csr.indptr[1:] - heatmap_i_csr.indptr[:-1]
row_inds = heatmap_i_csr.data.zeros_like()
row_size_count = 0
for j, row_size in enumerate(row_sizes):
row_size_j = row_size.asscalar()
row_inds[row_size_count:(row_size_count + row_size_j)] = j
row_size_count += row_size_j
src_inds = heatmap_i_csr.data.argsort(is_ascend=False)
dst_pts_count = 0
heatmap_mask2_i = heatmap_mask2[i, 0]
dst_confs = heatmap_i_csr.data.zeros_like()
dst_pts = F.stack(dst_confs, dst_confs, axis=1)
for src_ind in src_inds:
src_ind_j = int(src_ind.asscalar())
col_j = int(heatmap_i_csr.indices[src_ind_j].asscalar())
row_j = int(row_inds[src_ind_j].asscalar())
pt = (row_j + pad, col_j + pad)
assert (pad <= pt[0] < heatmap_mask2_i.shape[0] - pad)
assert (pad <= pt[1] < heatmap_mask2_i.shape[1] - pad)
assert (0 <= pt[0] - pad < img_height)
assert (0 <= pt[1] - pad < img_width)
if heatmap_mask2_i[pt[0], pt[1]] == 1:
heatmap_mask2_i[(pt[0] - pad):(pt[0] + pad + 1), (pt[1] - pad):(pt[1] + pad + 1)] = 0
if (0 <= pt[0] - pad < img_height) and (0 <= pt[1] - pad < img_width):
dst_confs[dst_pts_count] = heatmap_i_csr.data[src_ind_j].asscalar()
dst_pts[dst_pts_count, 0] = row_j
dst_pts[dst_pts_count, 1] = col_j
dst_pts_count += 1
dst_confs = dst_confs[:dst_pts_count]
dst_pts = dst_pts[:dst_pts_count]
confs_list.append(dst_confs)
pts_list.append(dst_pts)
return pts_list, confs_list
class SPDescriptor(HybridBlock):
"""
SuperPointNet descriptor generator.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
descriptor_length : int, default 256
Descriptor length.
transpose_descriptors : bool, default True
Whether transpose descriptors with respect to points.
use_map_resize : bool, default True
Whether allow to resize descriptor map.
hybridizable : bool, default True
Whether allow to hybridize this block.
batch_size : int, default 1
Batch size.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
reduction : int, default 8
Feature reduction factor.
"""
def __init__(self,
in_channels,
mid_channels,
descriptor_length=256,
transpose_descriptors=True,
use_map_resize=True,
hybridizable=True,
batch_size=1,
in_size=(224, 224),
reduction=8,
**kwargs):
super(SPDescriptor, self).__init__(**kwargs)
assert ((batch_size is not None) or not hybridizable)
assert ((in_size is not None) or not hybridizable)
assert (use_map_resize or not hybridizable)
self.desc_length = descriptor_length
self.transpose_descriptors = transpose_descriptors
self.use_map_resize = use_map_resize
self.hybridizable = hybridizable
self.batch_size = batch_size
self.in_size = in_size
self.reduction = reduction
with self.name_scope():
self.head = SPHead(
in_channels=in_channels,
mid_channels=mid_channels,
out_channels=descriptor_length)
def hybrid_forward(self, F, x, pts):
coarse_desc_map = self.head(x)
coarse_desc_map = F.L2Normalization(coarse_desc_map, mode="channel")
in_size = self.in_size if self.in_size is not None else (x.shape[2] * self.reduction,
x.shape[3] * self.reduction)
if self.use_map_resize:
desc_map = F.contrib.BilinearResize2D(coarse_desc_map, height=in_size[0], width=in_size[1])
desc_map = F.L2Normalization(desc_map, mode="channel")
if not self.transpose_descriptors:
desc_map = desc_map.transpose(axes=(0, 1, 3, 2))
desc_map = desc_map.transpose(axes=(0, 2, 3, 1))
if self.hybridizable:
return desc_map
batch_size = self.batch_size if self.batch_size is not None else x.shape[0]
desc_map = desc_map.reshape(shape=(0, -3, 0))
desc_list = []
for i in range(batch_size):
desc_map_i = desc_map[i]
pts_i_tr = pts[i].transpose()
pts_ravel_i = F.ravel_multi_index(pts_i_tr, shape=in_size)
desc_map_sorted_i = F.take(desc_map_i, pts_ravel_i)
desc_list.append(desc_map_sorted_i)
return desc_list
else:
pts0 = (1.0 / self.reduction) * pts
batch_size = self.batch_size if self.batch_size is not None else x.shape[0]
desc_list = []
for i in range(batch_size):
src_desc_map_i = coarse_desc_map[i]
pts0_i = pts0[i]
dst_desc_map_i = interpolate_bilinear(
p=pts0_i,
img=src_desc_map_i,
img_size=(in_size[0] // self.reduction, in_size[1] // self.reduction))
desc_list.append(dst_desc_map_i)
return desc_list
class SuperPointNet(HybridBlock):
"""
SuperPointNet model from 'SuperPoint: Self-Supervised Interest Point Detection and Description,'
https://arxiv.org/abs/1712.07629.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
final_block_channels : int
Number of output channels for the final units.
transpose_descriptors : bool, default True
Whether transpose descriptors with respect to points.
hybridizable : bool, default True
Whether allow to hybridize this block.
batch_size : int, default 1
Batch size.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
in_channels : int, default 1
Number of input channels.
"""
def __init__(self,
channels,
final_block_channels,
transpose_descriptors=True,
hybridizable=True,
batch_size=1,
in_size=(224, 224),
in_channels=1,
**kwargs):
super(SuperPointNet, self).__init__(**kwargs)
assert ((batch_size is not None) or not hybridizable)
assert ((in_size is not None) or not hybridizable)
self.batch_size = batch_size
self.in_size = in_size
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
if (j == 0) and (i != 0):
stage.add(nn.MaxPool2D(
pool_size=2,
strides=2))
stage.add(conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
use_bn=False))
in_channels = out_channels
self.features.add(stage)
self.detector = SPDetector(
in_channels=in_channels,
mid_channels=final_block_channels,
hybridizable=hybridizable,
batch_size=batch_size,
in_size=in_size)
self.descriptor = SPDescriptor(
in_channels=in_channels,
mid_channels=final_block_channels,
transpose_descriptors=transpose_descriptors,
hybridizable=hybridizable,
batch_size=batch_size,
in_size=in_size)
def hybrid_forward(self, F, x):
x = self.features(x)
pts, confs = self.detector(x)
desc_map = self.descriptor(x, pts)
return pts, confs, desc_map
def get_superpointnet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SuperPointNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels_per_layers = [64, 64, 128, 128]
layers = [2, 2, 2, 2]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
final_block_channels = 256
net = SuperPointNet(
channels=channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def superpointnet(**kwargs):
"""
SuperPointNet model from 'SuperPoint: Self-Supervised Interest Point Detection and Description,'
https://arxiv.org/abs/1712.07629.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_superpointnet(model_name="superpointnet", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
hybridizable = True
batch_size = 1
# in_size = (224, 224)
in_size = (200, 400)
# in_size = (1000, 2000)
models = [
superpointnet,
]
for model in models:
net = model(pretrained=pretrained, hybridizable=hybridizable, batch_size=batch_size, in_size=in_size)
ctx = mx.gpu(0)
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != superpointnet or weight_count == 1300865)
x = mx.nd.random.normal(shape=(batch_size, 1, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert (len(y) == 3)
if __name__ == "__main__":
_test()
| 19,321 | 34.323583 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/ibndensenet.py | """
IBN-DenseNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
"""
__all__ = ['IBNDenseNet', 'ibn_densenet121', 'ibn_densenet161', 'ibn_densenet169', 'ibn_densenet201']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import pre_conv3x3_block, IBN
from .preresnet import PreResInitBlock, PreResActivation
from .densenet import TransitionBlock
class IBNPreConvBlock(HybridBlock):
"""
IBN-Net specific convolution block with BN/IBN normalization and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
use_ibn : bool, default False
Whether use Instance-Batch Normalization.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
use_ibn=False,
bn_use_global_stats=False,
return_preact=False,
**kwargs):
super(IBNPreConvBlock, self).__init__(**kwargs)
self.use_ibn = use_ibn
self.return_preact = return_preact
with self.name_scope():
if self.use_ibn:
self.ibn = IBN(
channels=in_channels,
bn_use_global_stats=bn_use_global_stats,
first_fraction=0.6,
inst_first=False)
else:
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=False,
in_channels=in_channels)
def hybrid_forward(self, F, x):
if self.use_ibn:
x = self.ibn(x)
else:
x = self.bn(x)
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def ibn_pre_conv1x1_block(in_channels,
out_channels,
strides=1,
use_ibn=False,
bn_use_global_stats=False,
return_preact=False):
"""
1x1 version of the IBN-Net specific pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
use_ibn : bool, default False
Whether use Instance-Batch Normalization.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation.
"""
return IBNPreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
use_ibn=use_ibn,
bn_use_global_stats=bn_use_global_stats,
return_preact=return_preact)
class IBNDenseUnit(HybridBlock):
"""
IBN-DenseNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
conv1_ibn : bool
Whether to use IBN normalization in the first convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
dropout_rate,
conv1_ibn,
**kwargs):
super(IBNDenseUnit, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
bn_size = 4
inc_channels = out_channels - in_channels
mid_channels = inc_channels * bn_size
with self.name_scope():
self.conv1 = ibn_pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_ibn=conv1_ibn,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=inc_channels,
bn_use_global_stats=bn_use_global_stats)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
if self.use_dropout:
x = self.dropout(x)
x = F.concat(identity, x, dim=1)
return x
class IBNDenseNet(HybridBlock):
"""
IBN-DenseNet model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
dropout_rate=0.0,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(IBNDenseNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
if i != 0:
stage.add(TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2),
bn_use_global_stats=bn_use_global_stats))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
conv1_ibn = (i < 3) and (j % 3 == 0)
stage.add(IBNDenseUnit(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
dropout_rate=dropout_rate,
conv1_ibn=conv1_ibn))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_ibndensenet(num_layers,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create IBN-DenseNet model with specific parameters.
Parameters:
----------
num_layers : int
Number of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if num_layers == 121:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif num_layers == 161:
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif num_layers == 169:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif num_layers == 201:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
else:
raise ValueError("Unsupported IBN-DenseNet version with number of layers {}".format(num_layers))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = IBNDenseNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def ibn_densenet121(**kwargs):
"""
IBN-DenseNet-121 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibndensenet(num_layers=121, model_name="ibn_densenet121", **kwargs)
def ibn_densenet161(**kwargs):
"""
IBN-DenseNet-161 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibndensenet(num_layers=161, model_name="ibn_densenet161", **kwargs)
def ibn_densenet169(**kwargs):
"""
IBN-DenseNet-169 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibndensenet(num_layers=169, model_name="ibn_densenet169", **kwargs)
def ibn_densenet201(**kwargs):
"""
IBN-DenseNet-201 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibndensenet(num_layers=201, model_name="ibn_densenet201", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
ibn_densenet121,
ibn_densenet161,
ibn_densenet169,
ibn_densenet201,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ibn_densenet121 or weight_count == 7978856)
assert (model != ibn_densenet161 or weight_count == 28681000)
assert (model != ibn_densenet169 or weight_count == 14149480)
assert (model != ibn_densenet201 or weight_count == 20013928)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 14,757 | 32.848624 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/hardnet.py | """
HarDNet for ImageNet-1K, implemented in Gluon.
Original paper: 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948.
"""
__all__ = ['HarDNet', 'hardnet39ds', 'hardnet68ds', 'hardnet68', 'hardnet85']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv_block
class InvDwsConvBlock(HybridBlock):
"""
Inverse depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
pw_activation : function or str or None, default nn.Activation('relu')
Activation function after the pointwise convolution block.
dw_activation : function or str or None, default nn.Activation('relu')
Activation function after the depthwise convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
pw_activation=(lambda: nn.Activation("relu")),
dw_activation=(lambda: nn.Activation("relu")),
**kwargs):
super(InvDwsConvBlock, self).__init__(**kwargs)
with self.name_scope():
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=pw_activation)
self.dw_conv = dwconv_block(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=dw_activation)
def hybrid_forward(self, F, x):
x = self.pw_conv(x)
x = self.dw_conv(x)
return x
def invdwsconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
bn_epsilon=1e-5,
bn_use_global_stats=False,
pw_activation=(lambda: nn.Activation("relu")),
dw_activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
3x3 inverse depthwise separable version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
pw_activation : function or str or None, default nn.Activation('relu')
Activation function after the pointwise convolution block.
dw_activation : function or str or None, default nn.Activation('relu')
Activation function after the depthwise convolution block.
"""
return InvDwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
pw_activation=pw_activation,
dw_activation=dw_activation,
**kwargs)
class HarDUnit(HybridBlock):
"""
HarDNet unit.
Parameters:
----------
in_channels_list : list of int
Number of input channels for each block.
out_channels_list : list of int
Number of output channels for each block.
links_list : list of list of int
List of indices for each layer.
use_deptwise : bool
Whether to use depthwise downsampling.
use_dropout : bool
Whether to use dropout module.
downsampling : bool
Whether to downsample input.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : str
Name of activation function.
"""
def __init__(self,
in_channels_list,
out_channels_list,
links_list,
use_deptwise,
use_dropout,
downsampling,
bn_use_global_stats,
activation,
**kwargs):
super(HarDUnit, self).__init__(**kwargs)
self.links_list = links_list
self.use_dropout = use_dropout
self.downsampling = downsampling
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i in range(len(links_list)):
in_channels = in_channels_list[i]
out_channels = out_channels_list[i]
if use_deptwise:
unit = invdwsconv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
pw_activation=activation,
dw_activation=None)
else:
unit = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
self.blocks.add(unit)
if self.use_dropout:
self.dropout = nn.Dropout(rate=0.1)
self.conv = conv1x1_block(
in_channels=in_channels_list[-1],
out_channels=out_channels_list[-1],
bn_use_global_stats=bn_use_global_stats,
activation=activation)
if self.downsampling:
if use_deptwise:
self.downsample = dwconv3x3_block(
in_channels=out_channels_list[-1],
out_channels=out_channels_list[-1],
strides=2,
bn_use_global_stats=bn_use_global_stats,
activation=None)
else:
self.downsample = nn.MaxPool2D(
pool_size=2,
strides=2)
def hybrid_forward(self, F, x):
layer_outs = [x]
for links_i, layer_i in zip(self.links_list, self.blocks._children.values()):
layer_in = []
for idx_ij in links_i:
layer_in.append(layer_outs[idx_ij])
if len(layer_in) > 1:
x = F.concat(*layer_in, dim=1)
else:
x = layer_in[0]
out = layer_i(x)
layer_outs.append(out)
outs = []
for i, layer_out_i in enumerate(layer_outs):
if (i == len(layer_outs) - 1) or (i % 2 == 1):
outs.append(layer_out_i)
x = F.concat(*outs, dim=1)
if self.use_dropout:
x = self.dropout(x)
x = self.conv(x)
if self.downsampling:
x = self.downsample(x)
return x
class HarDInitBlock(HybridBlock):
"""
HarDNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_deptwise : bool
Whether to use depthwise downsampling.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : str
Name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
use_deptwise,
bn_use_global_stats,
activation,
**kwargs):
super(HarDInitBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
conv2_block_class = conv1x1_block if use_deptwise else conv3x3_block
self.conv2 = conv2_block_class(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
if use_deptwise:
self.downsample = dwconv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
activation=None)
else:
self.downsample = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.downsample(x)
return x
class HarDNet(HybridBlock):
"""
HarDNet model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948.
Parameters:
----------
init_block_channels : int
Number of output channels for the initial unit.
unit_in_channels : list of list of list of int
Number of input channels for each layer in each stage.
unit_out_channels : list list of of list of int
Number of output channels for each layer in each stage.
unit_links : list of list of list of int
List of indices for each layer in each stage.
use_deptwise : bool
Whether to use depthwise downsampling.
use_last_dropout : bool
Whether to use dropouts in the last unit.
output_dropout_rate : float
Parameter of Dropout layer before classifier. Faction of the input units to drop.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
init_block_channels,
unit_in_channels,
unit_out_channels,
unit_links,
use_deptwise,
use_last_dropout,
output_dropout_rate,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(HarDNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
activation = "relu6"
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(HarDInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
use_deptwise=use_deptwise,
bn_use_global_stats=bn_use_global_stats,
activation=activation))
for i, (in_channels_list_i, out_channels_list_i) in enumerate(zip(unit_in_channels, unit_out_channels)):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, (in_channels_list_ij, out_channels_list_ij) in enumerate(zip(in_channels_list_i,
out_channels_list_i)):
use_dropout = ((j == len(in_channels_list_i) - 1) and (i == len(unit_in_channels) - 1) and
use_last_dropout)
downsampling = ((j == len(in_channels_list_i) - 1) and (i != len(unit_in_channels) - 1))
stage.add(HarDUnit(
in_channels_list=in_channels_list_ij,
out_channels_list=out_channels_list_ij,
links_list=unit_links[i][j],
use_deptwise=use_deptwise,
use_dropout=use_dropout,
downsampling=downsampling,
bn_use_global_stats=bn_use_global_stats,
activation=activation))
self.features.add(stage)
in_channels = unit_out_channels[-1][-1][-1]
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dropout(rate=output_dropout_rate))
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_hardnet(blocks,
use_deptwise=True,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create HarDNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
use_deepwise : bool, default True
Whether to use depthwise separable version of the model.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 39:
init_block_channels = 48
growth_factor = 1.6
dropout_rate = 0.05 if use_deptwise else 0.1
layers = [4, 16, 8, 4]
channels_per_layers = [96, 320, 640, 1024]
growth_rates = [16, 20, 64, 160]
downsamples = [1, 1, 1, 0]
use_dropout = False
elif blocks == 68:
init_block_channels = 64
growth_factor = 1.7
dropout_rate = 0.05 if use_deptwise else 0.1
layers = [8, 16, 16, 16, 4]
channels_per_layers = [128, 256, 320, 640, 1024]
growth_rates = [14, 16, 20, 40, 160]
downsamples = [1, 0, 1, 1, 0]
use_dropout = False
elif blocks == 85:
init_block_channels = 96
growth_factor = 1.7
dropout_rate = 0.05 if use_deptwise else 0.2
layers = [8, 16, 16, 16, 16, 4]
channels_per_layers = [192, 256, 320, 480, 720, 1280]
growth_rates = [24, 24, 28, 36, 48, 256]
downsamples = [1, 0, 1, 0, 1, 0]
use_dropout = True
else:
raise ValueError("Unsupported HarDNet version with number of layers {}".format(blocks))
assert (downsamples[-1] == 0)
def calc_stage_params():
def calc_unit_params():
def calc_blocks_params(layer_idx,
base_channels,
growth_rate):
if layer_idx == 0:
return base_channels, 0, []
out_channels_ij = growth_rate
links_ij = []
for k in range(10):
dv = 2 ** k
if layer_idx % dv == 0:
t = layer_idx - dv
links_ij.append(t)
if k > 0:
out_channels_ij *= growth_factor
out_channels_ij = int(int(out_channels_ij + 1) / 2) * 2
in_channels_ij = 0
for t in links_ij:
out_channels_ik, _, _ = calc_blocks_params(
layer_idx=t,
base_channels=base_channels,
growth_rate=growth_rate)
in_channels_ij += out_channels_ik
return out_channels_ij, in_channels_ij, links_ij
unit_out_channels = []
unit_in_channels = []
unit_links = []
for num_layers, growth_rate, base_channels, channels_per_layers_i in zip(
layers, growth_rates, [init_block_channels] + channels_per_layers[:-1], channels_per_layers):
stage_out_channels_i = 0
unit_out_channels_i = []
unit_in_channels_i = []
unit_links_i = []
for j in range(num_layers):
out_channels_ij, in_channels_ij, links_ij = calc_blocks_params(
layer_idx=(j + 1),
base_channels=base_channels,
growth_rate=growth_rate)
unit_out_channels_i.append(out_channels_ij)
unit_in_channels_i.append(in_channels_ij)
unit_links_i.append(links_ij)
if (j % 2 == 0) or (j == num_layers - 1):
stage_out_channels_i += out_channels_ij
unit_in_channels_i.append(stage_out_channels_i)
unit_out_channels_i.append(channels_per_layers_i)
unit_out_channels.append(unit_out_channels_i)
unit_in_channels.append(unit_in_channels_i)
unit_links.append(unit_links_i)
return unit_out_channels, unit_in_channels, unit_links
unit_out_channels, unit_in_channels, unit_links = calc_unit_params()
stage_out_channels = []
stage_in_channels = []
stage_links = []
stage_out_channels_k = None
for i in range(len(layers)):
if stage_out_channels_k is None:
stage_out_channels_k = []
stage_in_channels_k = []
stage_links_k = []
stage_out_channels_k.append(unit_out_channels[i])
stage_in_channels_k.append(unit_in_channels[i])
stage_links_k.append(unit_links[i])
if (downsamples[i] == 1) or (i == len(layers) - 1):
stage_out_channels.append(stage_out_channels_k)
stage_in_channels.append(stage_in_channels_k)
stage_links.append(stage_links_k)
stage_out_channels_k = None
return stage_out_channels, stage_in_channels, stage_links
stage_out_channels, stage_in_channels, stage_links = calc_stage_params()
net = HarDNet(
init_block_channels=init_block_channels,
unit_in_channels=stage_in_channels,
unit_out_channels=stage_out_channels,
unit_links=stage_links,
use_deptwise=use_deptwise,
use_last_dropout=use_dropout,
output_dropout_rate=dropout_rate,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def hardnet39ds(**kwargs):
"""
HarDNet-39DS (Depthwise Separable) model from 'HarDNet: A Low Memory Traffic Network,'
https://arxiv.org/abs/1909.00948.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hardnet(blocks=39, use_deptwise=True, model_name="hardnet39ds", **kwargs)
def hardnet68ds(**kwargs):
"""
HarDNet-68DS (Depthwise Separable) model from 'HarDNet: A Low Memory Traffic Network,'
https://arxiv.org/abs/1909.00948.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hardnet(blocks=68, use_deptwise=True, model_name="hardnet68ds", **kwargs)
def hardnet68(**kwargs):
"""
HarDNet-68 model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hardnet(blocks=68, use_deptwise=False, model_name="hardnet68", **kwargs)
def hardnet85(**kwargs):
"""
HarDNet-85 model from 'HarDNet: A Low Memory Traffic Network,' https://arxiv.org/abs/1909.00948.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hardnet(blocks=85, use_deptwise=False, model_name="hardnet85", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
hardnet39ds,
hardnet68ds,
hardnet68,
hardnet85,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != hardnet39ds or weight_count == 3488228)
assert (model != hardnet68ds or weight_count == 4180602)
assert (model != hardnet68 or weight_count == 17565348)
assert (model != hardnet85 or weight_count == 36670212)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 24,619 | 36.134238 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/sinet.py | """
SINet for image segmentation, implemented in Gluon.
Original paper: 'SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze Modules and
Information Blocking Decoder,' https://arxiv.org/abs/1911.09099.
"""
__all__ = ['SINet', 'sinet_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent
from .common import PReLU2, conv1x1, get_activation_layer, conv1x1_block, conv3x3_block, round_channels, dwconv_block,\
InterpolationBlock, ChannelShuffle
class SEBlock(HybridBlock):
"""
SINet version of Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,'
https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
round_mid : bool, default False
Whether to round middle channel number (make divisible by 8).
activation : function, or str, or nn.Module, default 'relu'
Activation function after the first convolution.
out_activation : function, or str, or nn.Module, default 'sigmoid'
Activation function after the last convolution.
"""
def __init__(self,
channels,
reduction=16,
round_mid=False,
mid_activation=(lambda: nn.Activation("relu")),
out_activation=(lambda: nn.Activation("sigmoid")),
**kwargs):
super(SEBlock, self).__init__(**kwargs)
self.use_conv2 = (reduction > 1)
mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)
with self.name_scope():
self.fc1 = nn.Dense(
in_units=channels,
units=mid_channels)
if self.use_conv2:
self.activ = get_activation_layer(mid_activation)
self.fc2 = nn.Dense(
in_units=mid_channels,
units=channels)
self.sigmoid = get_activation_layer(out_activation)
def hybrid_forward(self, F, x):
w = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
w = F.Flatten(w)
w = self.fc1(w)
if self.use_conv2:
w = self.activ(w)
w = self.fc2(w)
w = self.sigmoid(w)
w = w.expand_dims(2).expand_dims(3).broadcast_like(x)
x = x * w
return x
class DwsConvBlock(HybridBlock):
"""
SINet version of depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
dw_use_bn : bool, default True
Whether to use BatchNorm layer (depthwise convolution block).
pw_use_bn : bool, default True
Whether to use BatchNorm layer (pointwise convolution block).
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the pointwise convolution block.
se_reduction : int, default 0
Squeeze reduction value (0 means no-se).
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
dw_use_bn=True,
pw_use_bn=True,
bn_epsilon=1e-5,
dw_activation=(lambda: nn.Activation("relu")),
pw_activation=(lambda: nn.Activation("relu")),
se_reduction=0,
**kwargs):
super(DwsConvBlock, self).__init__(**kwargs)
self.use_se = (se_reduction > 0)
with self.name_scope():
self.dw_conv = dwconv_block(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=dw_use_bn,
bn_epsilon=bn_epsilon,
activation=dw_activation)
if self.use_se:
self.se = SEBlock(
channels=in_channels,
reduction=se_reduction,
round_mid=False,
mid_activation=(lambda: PReLU2(in_channels // se_reduction)),
out_activation=(lambda: PReLU2(in_channels)))
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=pw_use_bn,
bn_epsilon=bn_epsilon,
activation=pw_activation)
def hybrid_forward(self, F, x):
x = self.dw_conv(x)
if self.use_se:
x = self.se(x)
x = self.pw_conv(x)
return x
def dwsconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
dw_use_bn=True,
pw_use_bn=True,
bn_epsilon=1e-5,
dw_activation=(lambda: nn.Activation("relu")),
pw_activation=(lambda: nn.Activation("relu")),
se_reduction=0,
**kwargs):
"""
3x3 depthwise separable version of the standard convolution block (SINet version).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
dw_use_bn : bool, default True
Whether to use BatchNorm layer (depthwise convolution block).
pw_use_bn : bool, default True
Whether to use BatchNorm layer (pointwise convolution block).
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the pointwise convolution block.
se_reduction : int, default 0
Squeeze reduction value (0 means no-se).
"""
return DwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
dw_use_bn=dw_use_bn,
pw_use_bn=pw_use_bn,
bn_epsilon=bn_epsilon,
dw_activation=dw_activation,
pw_activation=pw_activation,
se_reduction=se_reduction,
**kwargs)
def dwconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
bn_epsilon=1e-5,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
3x3 depthwise version of the standard convolution block (SINet version).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
bn_epsilon=bn_epsilon,
activation=activation,
**kwargs)
class FDWConvBlock(HybridBlock):
"""
Factorized depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the each convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
activation=(lambda: nn.Activation("relu")),
**kwargs):
super(FDWConvBlock, self).__init__(**kwargs)
assert use_bn
self.activate = (activation is not None)
with self.name_scope():
self.v_conv = dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(kernel_size, 1),
strides=strides,
padding=(padding, 0),
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
activation=None)
self.h_conv = dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(1, kernel_size),
strides=strides,
padding=(0, padding),
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
activation=None)
if self.activate:
self.act = get_activation_layer(activation)
def hybrid_forward(self, F, x):
x = self.v_conv(x) + self.h_conv(x)
if self.activate:
x = self.act(x)
return x
def fdwconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
3x3 factorized depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return FDWConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
activation=activation,
**kwargs)
def fdwconv5x5_block(in_channels,
out_channels,
strides=1,
padding=2,
dilation=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
5x5 factorized depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return FDWConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
activation=activation,
**kwargs)
class SBBlock(HybridBlock):
"""
SB-block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size for a factorized depthwise separable convolution block.
scale_factor : int
Scale factor.
size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_epsilon : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
scale_factor,
size,
bn_epsilon,
**kwargs):
super(SBBlock, self).__init__(**kwargs)
self.use_scale = (scale_factor > 1)
with self.name_scope():
if self.use_scale:
self.down_scale = nn.AvgPool2D(
pool_size=scale_factor,
strides=scale_factor)
self.up_scale = InterpolationBlock(
scale_factor=scale_factor,
out_size=size)
use_fdw = (scale_factor > 0)
if use_fdw:
fdwconv3x3_class = fdwconv3x3_block if kernel_size == 3 else fdwconv5x5_block
self.conv1 = fdwconv3x3_class(
in_channels=in_channels,
out_channels=in_channels,
bn_epsilon=bn_epsilon,
activation=(lambda: PReLU2(in_channels)))
else:
self.conv1 = dwconv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
bn_epsilon=bn_epsilon,
activation=(lambda: PReLU2(in_channels)))
self.conv2 = conv1x1(
in_channels=in_channels,
out_channels=out_channels)
self.bn = nn.BatchNorm(
in_channels=out_channels,
epsilon=bn_epsilon)
def hybrid_forward(self, F, x):
if self.use_scale:
x = self.down_scale(x)
x = self.conv1(x)
x = self.conv2(x)
if self.use_scale:
x = self.up_scale(x)
x = self.bn(x)
return x
class PreActivation(HybridBlock):
"""
PreResNet like pure pre-activation block without convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
bn_epsilon=1e-5,
**kwargs):
super(PreActivation, self).__init__(**kwargs)
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
epsilon=bn_epsilon)
self.activ = PReLU2(in_channels)
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
return x
class ESPBlock(HybridBlock):
"""
ESP block, which is based on the following principle: Reduce ---> Split ---> Transform --> Merge.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_sizes : list of int
Convolution window size for branches.
scale_factors : list of int
Scale factor for branches.
use_residual : bool
Whether to use residual connection.
in_size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_epsilon : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
kernel_sizes,
scale_factors,
use_residual,
in_size,
bn_epsilon,
**kwargs):
super(ESPBlock, self).__init__(**kwargs)
self.use_residual = use_residual
groups = len(kernel_sizes)
mid_channels = int(out_channels / groups)
res_channels = out_channels - groups * mid_channels
with self.name_scope():
self.conv = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
groups=groups)
self.c_shuffle = ChannelShuffle(
channels=mid_channels,
groups=groups)
self.branches = HybridConcurrent(axis=1, prefix="")
with self.branches.name_scope():
for i in range(groups):
out_channels_i = (mid_channels + res_channels) if i == 0 else mid_channels
self.branches.add(SBBlock(
in_channels=mid_channels,
out_channels=out_channels_i,
kernel_size=kernel_sizes[i],
scale_factor=scale_factors[i],
size=in_size,
bn_epsilon=bn_epsilon))
self.preactiv = PreActivation(
in_channels=out_channels,
bn_epsilon=bn_epsilon)
def hybrid_forward(self, F, x):
if self.use_residual:
identity = x
x = self.conv(x)
x = self.c_shuffle(x)
x = self.branches(x)
if self.use_residual:
x = identity + x
x = self.preactiv(x)
return x
class SBStage(HybridBlock):
"""
SB stage.
Parameters:
----------
in_channels : int
Number of input channels.
down_channels : int
Number of output channels for a downscale block.
channels_list : list of int
Number of output channels for all residual block.
kernel_sizes_list : list of int
Convolution window size for branches.
scale_factors_list : list of int
Scale factor for branches.
use_residual_list : list of int
List of flags for using residual in each ESP-block.
se_reduction : int
Squeeze reduction value (0 means no-se).
in_size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_epsilon : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
down_channels,
channels_list,
kernel_sizes_list,
scale_factors_list,
use_residual_list,
se_reduction,
in_size,
bn_epsilon,
**kwargs):
super(SBStage, self).__init__(**kwargs)
with self.name_scope():
self.down_conv = dwsconv3x3_block(
in_channels=in_channels,
out_channels=down_channels,
strides=2,
dw_use_bn=False,
bn_epsilon=bn_epsilon,
dw_activation=None,
pw_activation=(lambda: PReLU2(down_channels)),
se_reduction=se_reduction)
in_channels = down_channels
self.main_branch = nn.HybridSequential(prefix="")
with self.main_branch.name_scope():
for i, out_channels in enumerate(channels_list):
use_residual = (use_residual_list[i] == 1)
kernel_sizes = kernel_sizes_list[i]
scale_factors = scale_factors_list[i]
self.main_branch.add(ESPBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_sizes=kernel_sizes,
scale_factors=scale_factors,
use_residual=use_residual,
in_size=((in_size[0] // 2, in_size[1] // 2) if in_size else None),
bn_epsilon=bn_epsilon))
in_channels = out_channels
self.preactiv = PreActivation(
in_channels=(down_channels + in_channels),
bn_epsilon=bn_epsilon)
def hybrid_forward(self, F, x):
x = self.down_conv(x)
y = self.main_branch(x)
x = F.concat(x, y, dim=1)
x = self.preactiv(x)
return x, y
class SBEncoderInitBlock(HybridBlock):
"""
SB encoder specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
out_channels : int
Number of output channels.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
mid_channels,
out_channels,
bn_epsilon,
**kwargs):
super(SBEncoderInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
bn_epsilon=bn_epsilon,
activation=(lambda: PReLU2(mid_channels)))
self.conv2 = dwsconv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=2,
dw_use_bn=False,
bn_epsilon=bn_epsilon,
dw_activation=None,
pw_activation=(lambda: PReLU2(out_channels)),
se_reduction=1)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class SBEncoder(HybridBlock):
"""
SB encoder for SINet.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of input channels.
init_block_channels : list int
Number of output channels for convolutions in the initial block.
down_channels_list : list of int
Number of downsample channels for each residual block.
channels_list : list of list of int
Number of output channels for all residual block.
kernel_sizes_list : list of list of int
Convolution window size for each residual block.
scale_factors_list : list of list of int
Scale factor for each residual block.
use_residual_list : list of list of int
List of flags for using residual in each residual block.
in_size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_epsilon : float
Small float added to variance in Batch norm.
"""
def __init__(self,
in_channels,
out_channels,
init_block_channels,
down_channels_list,
channels_list,
kernel_sizes_list,
scale_factors_list,
use_residual_list,
in_size,
bn_epsilon,
**kwargs):
super(SBEncoder, self).__init__(**kwargs)
with self.name_scope():
self.init_block = SBEncoderInitBlock(
in_channels=in_channels,
mid_channels=init_block_channels[0],
out_channels=init_block_channels[1],
bn_epsilon=bn_epsilon)
in_channels = init_block_channels[1]
self.stage1 = SBStage(
in_channels=in_channels,
down_channels=down_channels_list[0],
channels_list=channels_list[0],
kernel_sizes_list=kernel_sizes_list[0],
scale_factors_list=scale_factors_list[0],
use_residual_list=use_residual_list[0],
se_reduction=1,
in_size=((in_size[0] // 4, in_size[1] // 4) if in_size else None),
bn_epsilon=bn_epsilon)
in_channels = down_channels_list[0] + channels_list[0][-1]
self.stage2 = SBStage(
in_channels=in_channels,
down_channels=down_channels_list[1],
channels_list=channels_list[1],
kernel_sizes_list=kernel_sizes_list[1],
scale_factors_list=scale_factors_list[1],
use_residual_list=use_residual_list[1],
se_reduction=2,
in_size=((in_size[0] // 8, in_size[1] // 8) if in_size else None),
bn_epsilon=bn_epsilon)
in_channels = down_channels_list[1] + channels_list[1][-1]
self.output = conv1x1(
in_channels=in_channels,
out_channels=out_channels)
def hybrid_forward(self, F, x):
y1 = self.init_block(x)
x, y2 = self.stage1(y1)
x, _ = self.stage2(x)
x = self.output(x)
return x, y2, y1
class SBDecodeBlock(HybridBlock):
"""
SB decoder block for SINet.
Parameters:
----------
channels : int
Number of output classes.
out_size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_epsilon : float
Small float added to variance in Batch norm.
"""
def __init__(self,
channels,
out_size,
bn_epsilon,
**kwargs):
super(SBDecodeBlock, self).__init__(**kwargs)
with self.name_scope():
self.up = InterpolationBlock(
scale_factor=2,
out_size=out_size)
self.bn = nn.BatchNorm(
in_channels=channels,
epsilon=bn_epsilon)
def hybrid_forward(self, F, x, y):
x = self.up(x)
x = self.bn(x)
w_conf = x.softmax()
w_max = w_conf.max(axis=1).expand_dims(1).broadcast_like(x)
x = y * (1 - w_max) + x
return x
class SBDecoder(HybridBlock):
"""
SB decoder for SINet.
Parameters:
----------
dim2 : int
Size of dimension #2.
classes : int
Number of segmentation classes.
out_size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_epsilon : float
Small float added to variance in Batch norm.
"""
def __init__(self,
dim2,
classes,
out_size,
bn_epsilon,
**kwargs):
super(SBDecoder, self).__init__(**kwargs)
with self.name_scope():
self.decode1 = SBDecodeBlock(
channels=classes,
out_size=((out_size[0] // 8, out_size[1] // 8) if out_size else None),
bn_epsilon=bn_epsilon)
self.decode2 = SBDecodeBlock(
channels=classes,
out_size=((out_size[0] // 4, out_size[1] // 4) if out_size else None),
bn_epsilon=bn_epsilon)
self.conv3c = conv1x1_block(
in_channels=dim2,
out_channels=classes,
bn_epsilon=bn_epsilon,
activation=(lambda: PReLU2(classes)))
self.output = nn.Conv2DTranspose(
channels=classes,
kernel_size=2,
strides=2,
padding=0,
output_padding=0,
in_channels=classes,
use_bias=False)
self.up = InterpolationBlock(
scale_factor=2,
out_size=out_size)
def hybrid_forward(self, F, y3, y2, y1):
y2 = self.conv3c(y2)
x = self.decode1(y3, y2)
x = self.decode2(x, y1)
x = self.output(x)
x = self.up(x)
return x
class SINet(HybridBlock):
"""
SINet model from 'SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze Modules and
Information Blocking Decoder,' https://arxiv.org/abs/1911.09099.
Parameters:
----------
down_channels_list : list of int
Number of downsample channels for each residual block.
channels_list : list of list of int
Number of output channels for all residual block.
kernel_sizes_list : list of list of int
Convolution window size for each residual block.
scale_factors_list : list of list of int
Scale factor for each residual block.
use_residual_list : list of list of int
List of flags for using residual in each residual block.
dim2 : int
Size of dimension #2.
bn_epsilon : float
Small float added to variance in Batch norm.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
"""
def __init__(self,
down_channels_list,
channels_list,
kernel_sizes_list,
scale_factors_list,
use_residual_list,
dim2,
bn_epsilon,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(1024, 2048),
classes=21,
**kwargs):
super(SINet, self).__init__(**kwargs)
assert (fixed_size is not None)
assert (in_channels > 0)
assert ((in_size[0] % 64 == 0) and (in_size[1] % 64 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
with self.name_scope():
init_block_channels = [16, classes]
out_channels = classes
self.encoder = SBEncoder(
in_channels=in_channels,
out_channels=out_channels,
init_block_channels=init_block_channels,
down_channels_list=down_channels_list,
channels_list=channels_list,
kernel_sizes_list=kernel_sizes_list,
scale_factors_list=scale_factors_list,
use_residual_list=use_residual_list,
in_size=(in_size if fixed_size else None),
bn_epsilon=bn_epsilon)
self.decoder = SBDecoder(
dim2=dim2,
classes=classes,
out_size=(in_size if fixed_size else None),
bn_epsilon=bn_epsilon)
def hybrid_forward(self, F, x):
y3, y2, y1 = self.encoder(x)
x = self.decoder(y3, y2, y1)
if self.aux:
return x, y3
else:
return x
def get_sinet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SINet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
kernel_sizes_list = [
[[3, 5], [3, 3], [3, 3]],
[[3, 5], [3, 3], [5, 5], [3, 5], [3, 5], [3, 5], [3, 3], [5, 5], [3, 5], [3, 5]]]
scale_factors_list = [
[[1, 1], [0, 1], [0, 1]],
[[1, 1], [0, 1], [1, 4], [2, 8], [1, 1], [1, 1], [0, 1], [1, 8], [2, 4], [0, 2]]]
chnn = 4
dims = [24] + [24 * (i + 2) + 4 * (chnn - 1) for i in range(3)]
dim1 = dims[0]
dim2 = dims[1]
dim3 = dims[2]
dim4 = dims[3]
p = len(kernel_sizes_list[0])
q = len(kernel_sizes_list[1])
channels_list = [[dim2] * p, ([dim3] * (q // 2)) + ([dim4] * (q - q // 2))]
use_residual_list = [[0] + ([1] * (p - 1)), [0] + ([1] * (q // 2 - 1)) + [0] + ([1] * (q - q // 2 - 1))]
down_channels_list = [dim1, dim2]
net = SINet(
down_channels_list=down_channels_list,
channels_list=channels_list,
kernel_sizes_list=kernel_sizes_list,
scale_factors_list=scale_factors_list,
use_residual_list=use_residual_list,
dim2=dims[1],
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def sinet_cityscapes(classes=19, **kwargs):
"""
SINet model for Cityscapes from 'SINet: Extreme Lightweight Portrait Segmentation Networks with Spatial Squeeze
Modules and Information Blocking Decoder,' https://arxiv.org/abs/1911.09099.
Parameters:
----------
classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sinet(classes=classes, bn_epsilon=1e-3, model_name="sinet_cityscapes", **kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (1024, 2048)
aux = False
fixed_size = True
pretrained = False
models = [
sinet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux, fixed_size=fixed_size)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sinet_cityscapes or weight_count == 119418)
batch = 14
x = mx.nd.zeros((batch, 3, in_size[0], in_size[1]), ctx=ctx)
ys = net(x)
y = ys[0] if aux else ys
assert (y.shape == (batch, 19, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 37,954 | 32.888393 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/shufflenetv2b.py | """
ShuffleNet V2 for ImageNet-1K, implemented in Gluon. The alternative version.
Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
"""
__all__ = ['ShuffleNetV2b', 'shufflenetv2b_wd2', 'shufflenetv2b_w1', 'shufflenetv2b_w3d2', 'shufflenetv2b_w2']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, ChannelShuffle, ChannelShuffle2, SEBlock
class ShuffleUnit(HybridBlock):
"""
ShuffleNetV2(b) unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
downsample : bool
Whether do downsample.
use_se : bool
Whether to use SE block.
use_residual : bool
Whether to use residual connection.
shuffle_group_first : bool
Whether to use channel shuffle in group first mode.
"""
def __init__(self,
in_channels,
out_channels,
downsample,
use_se,
use_residual,
shuffle_group_first,
**kwargs):
super(ShuffleUnit, self).__init__(**kwargs)
self.downsample = downsample
self.use_se = use_se
self.use_residual = use_residual
mid_channels = out_channels // 2
in_channels2 = in_channels // 2
assert (in_channels % 2 == 0)
y2_in_channels = (in_channels if downsample else in_channels2)
y2_out_channels = out_channels - y2_in_channels
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=y2_in_channels,
out_channels=mid_channels)
self.dconv = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=(2 if self.downsample else 1),
activation=None)
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=y2_out_channels)
if self.use_se:
self.se = SEBlock(channels=y2_out_channels)
if downsample:
self.shortcut_dconv = dwconv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
strides=2,
activation=None)
self.shortcut_conv = conv1x1_block(
in_channels=in_channels,
out_channels=in_channels)
if shuffle_group_first:
self.c_shuffle = ChannelShuffle(
channels=out_channels,
groups=2)
else:
self.c_shuffle = ChannelShuffle2(
channels=out_channels,
groups=2)
def hybrid_forward(self, F, x):
if self.downsample:
y1 = self.shortcut_dconv(x)
y1 = self.shortcut_conv(y1)
x2 = x
else:
y1, x2 = F.split(x, axis=1, num_outputs=2)
y2 = self.conv1(x2)
y2 = self.dconv(y2)
y2 = self.conv2(y2)
if self.use_se:
y2 = self.se(y2)
if self.use_residual and not self.downsample:
y2 = y2 + x2
x = F.concat(y1, y2, dim=1)
x = self.c_shuffle(x)
return x
class ShuffleInitBlock(HybridBlock):
"""
ShuffleNetV2(b) specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(ShuffleInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1,
ceil_mode=False)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pool(x)
return x
class ShuffleNetV2b(HybridBlock):
"""
ShuffleNetV2(b) model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
use_se : bool, default False
Whether to use SE block.
use_residual : bool, default False
Whether to use residual connections.
shuffle_group_first : bool, default True
Whether to use channel shuffle in group first mode.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
use_se=False,
use_residual=False,
shuffle_group_first=True,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ShuffleNetV2b, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ShuffleInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
stage.add(ShuffleUnit(
in_channels=in_channels,
out_channels=out_channels,
downsample=downsample,
use_se=use_se,
use_residual=use_residual,
shuffle_group_first=shuffle_group_first))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_shufflenetv2b(width_scale,
shuffle_group_first=True,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ShuffleNetV2(b) model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
shuffle_group_first : bool, default True
Whether to use channel shuffle in group first mode.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 24
final_block_channels = 1024
layers = [4, 8, 4]
channels_per_layers = [116, 232, 464]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
if width_scale > 1.5:
final_block_channels = int(final_block_channels * width_scale)
net = ShuffleNetV2b(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
shuffle_group_first=shuffle_group_first,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def shufflenetv2b_wd2(**kwargs):
"""
ShuffleNetV2(b) 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2b(
width_scale=(12.0 / 29.0),
shuffle_group_first=True,
model_name="shufflenetv2b_wd2",
**kwargs)
def shufflenetv2b_w1(**kwargs):
"""
ShuffleNetV2(b) 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2b(
width_scale=1.0,
shuffle_group_first=True,
model_name="shufflenetv2b_w1",
**kwargs)
def shufflenetv2b_w3d2(**kwargs):
"""
ShuffleNetV2(b) 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2b(
width_scale=(44.0 / 29.0),
shuffle_group_first=True,
model_name="shufflenetv2b_w3d2",
**kwargs)
def shufflenetv2b_w2(**kwargs):
"""
ShuffleNetV2(b) 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2b(
width_scale=(61.0 / 29.0),
shuffle_group_first=True,
model_name="shufflenetv2b_w2",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
shufflenetv2b_wd2,
shufflenetv2b_w1,
shufflenetv2b_w3d2,
shufflenetv2b_w2,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenetv2b_wd2 or weight_count == 1366792)
assert (model != shufflenetv2b_w1 or weight_count == 2279760)
assert (model != shufflenetv2b_w3d2 or weight_count == 4410194)
assert (model != shufflenetv2b_w2 or weight_count == 7611290)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 13,269 | 32.00995 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/sparsenet.py | """
SparseNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
"""
__all__ = ['SparseNet', 'sparsenet121', 'sparsenet161', 'sparsenet169', 'sparsenet201', 'sparsenet264']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import pre_conv1x1_block, pre_conv3x3_block
from .preresnet import PreResInitBlock, PreResActivation
from .densenet import TransitionBlock
def sparsenet_exponential_fetch(lst):
"""
SparseNet's specific exponential fetch.
Parameters:
----------
lst : list
List of something.
Returns:
-------
list
Filtered list.
"""
return [lst[len(lst) - 2**i] for i in range(1 + math.floor(math.log(len(lst), 2)))]
class SparseBlock(HybridBlock):
"""
SparseNet block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
dropout_rate,
**kwargs):
super(SparseBlock, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
bn_size = 4
mid_channels = out_channels * bn_size
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
if self.use_dropout:
x = self.dropout(x)
return x
class SparseStage(HybridBlock):
"""
SparseNet stage.
Parameters:
----------
in_channels : int
Number of input channels.
channels_per_stage : list of int
Number of output channels for each unit in stage.
growth_rate : int
Growth rate for blocks.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
do_transition : bool
Whether use transition block.
"""
def __init__(self,
in_channels,
channels_per_stage,
growth_rate,
bn_use_global_stats,
dropout_rate,
do_transition,
**kwargs):
super(SparseStage, self).__init__(**kwargs)
self.do_transition = do_transition
with self.name_scope():
if self.do_transition:
self.trans = TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2),
bn_use_global_stats=bn_use_global_stats)
in_channels = in_channels // 2
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(channels_per_stage):
self.blocks.add(SparseBlock(
in_channels=in_channels,
out_channels=growth_rate,
dropout_rate=dropout_rate,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
def hybrid_forward(self, F, x):
if self.do_transition:
x = self.trans(x)
outs = [x]
for block in self.blocks._children.values():
y = block(x)
outs.append(y)
flt_outs = sparsenet_exponential_fetch(outs)
x = F.concat(*flt_outs, dim=1)
return x
class SparseNet(HybridBlock):
"""
SparseNet model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
growth_rate : int
Growth rate for blocks.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
growth_rate,
bn_use_global_stats=False,
dropout_rate=0.0,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SparseNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = SparseStage(
in_channels=in_channels,
channels_per_stage=channels_per_stage,
growth_rate=growth_rate,
bn_use_global_stats=bn_use_global_stats,
dropout_rate=dropout_rate,
do_transition=(i != 0))
in_channels = channels_per_stage[-1]
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_sparsenet(num_layers,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SparseNet model with specific parameters.
Parameters:
----------
num_layers : int
Number of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if num_layers == 121:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif num_layers == 161:
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif num_layers == 169:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif num_layers == 201:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
elif num_layers == 264:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 64, 48]
else:
raise ValueError("Unsupported SparseNet version with number of layers {}".format(num_layers))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [sum(sparsenet_exponential_fetch([xj[0]] + [yj[0]] * (yj[1] + 1)))],
zip([growth_rate] * yi, range(yi)),
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = SparseNet(
channels=channels,
init_block_channels=init_block_channels,
growth_rate=growth_rate,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def sparsenet121(**kwargs):
"""
SparseNet-121 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=121, model_name="sparsenet121", **kwargs)
def sparsenet161(**kwargs):
"""
SparseNet-161 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=161, model_name="sparsenet161", **kwargs)
def sparsenet169(**kwargs):
"""
SparseNet-169 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=169, model_name="sparsenet169", **kwargs)
def sparsenet201(**kwargs):
"""
SparseNet-201 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=201, model_name="sparsenet201", **kwargs)
def sparsenet264(**kwargs):
"""
SparseNet-264 model from 'Sparsely Aggregated Convolutional Networks,' https://arxiv.org/abs/1801.05895.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sparsenet(num_layers=264, model_name="sparsenet264", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
sparsenet121,
sparsenet161,
sparsenet169,
sparsenet201,
sparsenet264,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sparsenet121 or weight_count == 3250824)
assert (model != sparsenet161 or weight_count == 9853288)
assert (model != sparsenet169 or weight_count == 4709864)
assert (model != sparsenet201 or weight_count == 5703144)
assert (model != sparsenet264 or weight_count == 7717224)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 13,347 | 31.635697 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/menet.py | """
MENet for ImageNet-1K, implemented in Gluon.
Original paper: 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,'
https://arxiv.org/abs/1803.09127.
"""
__all__ = ['MENet', 'menet108_8x1_g3', 'menet128_8x1_g4', 'menet160_8x1_g8', 'menet228_12x1_g3', 'menet256_12x1_g4',
'menet348_12x1_g3', 'menet352_12x1_g8', 'menet456_24x1_g3']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv3x3, depthwise_conv3x3, ChannelShuffle
class MEUnit(HybridBlock):
"""
MENet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
side_channels : int
Number of side channels.
groups : int
Number of groups in convolution layers.
downsample : bool
Whether do downsample.
ignore_group : bool
Whether ignore group value in the first convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
side_channels,
groups,
downsample,
ignore_group,
**kwargs):
super(MEUnit, self).__init__(**kwargs)
self.downsample = downsample
mid_channels = out_channels // 4
if downsample:
out_channels -= in_channels
with self.name_scope():
# residual branch
self.compress_conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
groups=(1 if ignore_group else groups))
self.compress_bn1 = nn.BatchNorm(in_channels=mid_channels)
self.c_shuffle = ChannelShuffle(
channels=mid_channels,
groups=groups)
self.dw_conv2 = depthwise_conv3x3(
channels=mid_channels,
strides=(2 if self.downsample else 1))
self.dw_bn2 = nn.BatchNorm(in_channels=mid_channels)
self.expand_conv3 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
groups=groups)
self.expand_bn3 = nn.BatchNorm(in_channels=out_channels)
if downsample:
self.avgpool = nn.AvgPool2D(pool_size=3, strides=2, padding=1)
self.activ = nn.Activation("relu")
# fusion branch
self.s_merge_conv = conv1x1(
in_channels=mid_channels,
out_channels=side_channels)
self.s_merge_bn = nn.BatchNorm(in_channels=side_channels)
self.s_conv = conv3x3(
in_channels=side_channels,
out_channels=side_channels,
strides=(2 if self.downsample else 1))
self.s_conv_bn = nn.BatchNorm(in_channels=side_channels)
self.s_evolve_conv = conv1x1(
in_channels=side_channels,
out_channels=mid_channels)
self.s_evolve_bn = nn.BatchNorm(in_channels=mid_channels)
def hybrid_forward(self, F, x):
identity = x
# pointwise group convolution 1
x = self.compress_conv1(x)
x = self.compress_bn1(x)
x = self.activ(x)
x = self.c_shuffle(x)
# merging
y = self.s_merge_conv(x)
y = self.s_merge_bn(y)
y = self.activ(y)
# depthwise convolution (bottleneck)
x = self.dw_conv2(x)
x = self.dw_bn2(x)
# evolution
y = self.s_conv(y)
y = self.s_conv_bn(y)
y = self.activ(y)
y = self.s_evolve_conv(y)
y = self.s_evolve_bn(y)
y = F.sigmoid(y)
x = x * y
# pointwise group convolution 2
x = self.expand_conv3(x)
x = self.expand_bn3(x)
# identity branch
if self.downsample:
identity = self.avgpool(identity)
x = F.concat(x, identity, dim=1)
else:
x = x + identity
x = self.activ(x)
return x
class MEInitBlock(HybridBlock):
"""
MENet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(MEInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=3,
strides=2,
padding=1,
use_bias=False,
in_channels=in_channels)
self.bn = nn.BatchNorm(in_channels=out_channels)
self.activ = nn.Activation("relu")
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.bn(x)
x = self.activ(x)
x = self.pool(x)
return x
class MENet(HybridBlock):
"""
MENet model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,'
https://arxiv.org/abs/1803.09127.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
side_channels : int
Number of side channels in a ME-unit.
groups : int
Number of groups in convolution layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
side_channels,
groups,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(MENet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(MEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
ignore_group = (i == 0) and (j == 0)
stage.add(MEUnit(
in_channels=in_channels,
out_channels=out_channels,
side_channels=side_channels,
groups=groups,
downsample=downsample,
ignore_group=ignore_group))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_menet(first_stage_channels,
side_channels,
groups,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create MENet model with specific parameters.
Parameters:
----------
first_stage_channels : int
Number of output channels at the first stage.
side_channels : int
Number of side channels in a ME-unit.
groups : int
Number of groups in convolution layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
layers = [4, 8, 4]
if first_stage_channels == 108:
init_block_channels = 12
channels_per_layers = [108, 216, 432]
elif first_stage_channels == 128:
init_block_channels = 12
channels_per_layers = [128, 256, 512]
elif first_stage_channels == 160:
init_block_channels = 16
channels_per_layers = [160, 320, 640]
elif first_stage_channels == 228:
init_block_channels = 24
channels_per_layers = [228, 456, 912]
elif first_stage_channels == 256:
init_block_channels = 24
channels_per_layers = [256, 512, 1024]
elif first_stage_channels == 348:
init_block_channels = 24
channels_per_layers = [348, 696, 1392]
elif first_stage_channels == 352:
init_block_channels = 24
channels_per_layers = [352, 704, 1408]
elif first_stage_channels == 456:
init_block_channels = 48
channels_per_layers = [456, 912, 1824]
else:
raise ValueError("The {} of `first_stage_channels` is not supported".format(first_stage_channels))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = MENet(
channels=channels,
init_block_channels=init_block_channels,
side_channels=side_channels,
groups=groups,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def menet108_8x1_g3(**kwargs):
"""
108-MENet-8x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=108, side_channels=8, groups=3, model_name="menet108_8x1_g3", **kwargs)
def menet128_8x1_g4(**kwargs):
"""
128-MENet-8x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=128, side_channels=8, groups=4, model_name="menet128_8x1_g4", **kwargs)
def menet160_8x1_g8(**kwargs):
"""
160-MENet-8x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=160, side_channels=8, groups=8, model_name="menet160_8x1_g8", **kwargs)
def menet228_12x1_g3(**kwargs):
"""
228-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=228, side_channels=12, groups=3, model_name="menet228_12x1_g3", **kwargs)
def menet256_12x1_g4(**kwargs):
"""
256-MENet-12x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=256, side_channels=12, groups=4, model_name="menet256_12x1_g4", **kwargs)
def menet348_12x1_g3(**kwargs):
"""
348-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=348, side_channels=12, groups=3, model_name="menet348_12x1_g3", **kwargs)
def menet352_12x1_g8(**kwargs):
"""
352-MENet-12x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=352, side_channels=12, groups=8, model_name="menet352_12x1_g8", **kwargs)
def menet456_24x1_g3(**kwargs):
"""
456-MENet-24x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_menet(first_stage_channels=456, side_channels=24, groups=3, model_name="menet456_24x1_g3", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
menet108_8x1_g3,
menet128_8x1_g4,
# menet160_8x1_g8,
menet228_12x1_g3,
menet256_12x1_g4,
menet348_12x1_g3,
menet352_12x1_g8,
menet456_24x1_g3,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != menet108_8x1_g3 or weight_count == 654516)
assert (model != menet128_8x1_g4 or weight_count == 750796)
assert (model != menet160_8x1_g8 or weight_count == 850120)
assert (model != menet228_12x1_g3 or weight_count == 1806568)
assert (model != menet256_12x1_g4 or weight_count == 1888240)
assert (model != menet348_12x1_g3 or weight_count == 3368128)
assert (model != menet352_12x1_g8 or weight_count == 2272872)
assert (model != menet456_24x1_g3 or weight_count == 5304784)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 17,113 | 33.365462 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/voca.py | """
VOCA for speech-driven facial animation, implemented in Gluon.
Original paper: 'Capture, Learning, and Synthesis of 3D Speaking Styles,' https://arxiv.org/abs/1905.03079.
"""
__all__ = ['VOCA', 'voca8flame']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import ConvBlock
class VocaEncoder(HybridBlock):
"""
VOCA encoder.
Parameters:
----------
audio_features : int
Number of audio features (characters/sounds).
audio_window_size : int
Size of audio window (for time related audio features).
base_persons : int
Number of base persons (subjects).
encoder_features : int
Number of encoder features.
"""
def __init__(self,
audio_features,
audio_window_size,
base_persons,
encoder_features,
**kwargs):
super(VocaEncoder, self).__init__(**kwargs)
self.audio_window_size = audio_window_size
channels = (32, 32, 64, 64)
fc1_channels = 128
with self.name_scope():
self.bn = nn.BatchNorm(in_channels=1)
in_channels = audio_features + base_persons
self.branch = nn.HybridSequential(prefix="")
with self.branch.name_scope():
for out_channels in channels:
self.branch.add(ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 1),
strides=(2, 1),
padding=(1, 0),
use_bias=True,
use_bn=False))
in_channels = out_channels
in_channels += base_persons
self.fc1 = nn.Dense(
units=fc1_channels,
in_units=in_channels)
self.fc2 = nn.Dense(
units=encoder_features,
in_units=fc1_channels)
def hybrid_forward(self, F, x, pid):
x = self.bn(x)
x = x.swapaxes(1, 3)
y = pid.expand_dims(-1).expand_dims(-1)
y = y.tile(reps=(1, 1, self.audio_window_size, 1))
x = F.concat(x, y, dim=1)
x = self.branch(x)
x = x.flatten()
x = F.concat(x, pid, dim=1)
x = self.fc1(x)
x = x.tanh()
x = self.fc2(x)
return x
class VOCA(HybridBlock):
"""
VOCA model from 'Capture, Learning, and Synthesis of 3D Speaking Styles,' https://arxiv.org/abs/1905.03079.
Parameters:
----------
audio_features : int, default 29
Number of audio features (characters/sounds).
audio_window_size : int, default 16
Size of audio window (for time related audio features).
base_persons : int, default 8
Number of base persons (subjects).
encoder_features : int, default 50
Number of encoder features.
vertices : int, default 5023
Number of 3D geometry vertices.
"""
def __init__(self,
audio_features=29,
audio_window_size=16,
base_persons=8,
encoder_features=50,
vertices=5023,
**kwargs):
super(VOCA, self).__init__(**kwargs)
self.base_persons = base_persons
with self.name_scope():
self.encoder = VocaEncoder(
audio_features=audio_features,
audio_window_size=audio_window_size,
base_persons=base_persons,
encoder_features=encoder_features)
self.decoder = nn.Dense(
units=(3 * vertices),
in_units=encoder_features)
def hybrid_forward(self, F, x, pid):
pid = pid.one_hot(depth=self.base_persons)
x = self.encoder(x, pid)
x = self.decoder(x)
x = x.reshape((0, 1, -1, 3))
return x
def get_voca(base_persons,
vertices,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create VOCA model with specific parameters.
Parameters:
----------
base_persons : int
Number of base persons (subjects).
vertices : int
Number of 3D geometry vertices.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = VOCA(
base_persons=base_persons,
vertices=vertices,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def voca8flame(**kwargs):
"""
VOCA-8-FLAME model for 8 base persons and FLAME topology from 'Capture, Learning, and Synthesis of 3D Speaking
Styles,' https://arxiv.org/abs/1905.03079.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_voca(base_persons=8, vertices=5023, model_name="voca8flame", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
voca8flame,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != voca8flame or weight_count == 809563)
batch = 14
audio_features = 29
audio_window_size = 16
vertices = 5023
x = mx.nd.random.normal(shape=(batch, 1, audio_window_size, audio_features), ctx=ctx)
pid = mx.nd.array(np.full(shape=(batch,), fill_value=3), ctx=ctx)
y = net(x, pid)
assert (y.shape == (batch, 1, vertices, 3))
if __name__ == "__main__":
_test()
| 6,959 | 29.79646 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/shakeshakeresnet_cifar.py | """
Shake-Shake-ResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
"""
__all__ = ['CIFARShakeShakeResNet', 'shakeshakeresnet20_2x16d_cifar10', 'shakeshakeresnet20_2x16d_cifar100',
'shakeshakeresnet20_2x16d_svhn', 'shakeshakeresnet26_2x32d_cifar10', 'shakeshakeresnet26_2x32d_cifar100',
'shakeshakeresnet26_2x32d_svhn']
import os
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv3x3_block
from .resnet import ResBlock, ResBottleneck
class ShakeShake(mx.autograd.Function):
"""
Shake-Shake function.
"""
def forward(self, x1, x2):
if mx.autograd.is_training():
alpha = mx.nd.random.uniform_like(x1.slice(begin=(None, 0, 0, 0), end=(None, 1, 1, 1)))
y = mx.nd.broadcast_mul(alpha, x1) + mx.nd.broadcast_mul(1 - alpha, x2)
else:
y = 0.5 * (x1 + x2)
return y
def backward(self, dy):
beta = mx.nd.random.uniform_like(dy.slice(begin=(None, 0, 0, 0), end=(None, 1, 1, 1)))
return mx.nd.broadcast_mul(beta, dy), mx.nd.broadcast_mul(1 - beta, dy)
class ShakeShakeShortcut(HybridBlock):
"""
Shake-Shake-ResNet shortcut.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
**kwargs):
super(ShakeShakeShortcut, self).__init__(**kwargs)
assert (out_channels % 2 == 0)
mid_channels = out_channels // 2
with self.name_scope():
self.pool = nn.AvgPool2D(
pool_size=1,
strides=strides)
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.bn = nn.BatchNorm(
in_channels=out_channels,
use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x1 = self.pool(x)
x1 = self.conv1(x1)
x2 = F.slice(x, begin=(None, None, None, None), end=(None, None, -1, -1))
x2 = F.pad(x2, mode="constant", pad_width=(0, 0, 0, 0, 1, 0, 1, 0), constant_value=0)
x2 = self.pool(x2)
x2 = self.conv2(x2)
x = F.concat(x1, x2, dim=1)
x = self.bn(x)
return x
class ShakeShakeResUnit(HybridBlock):
"""
Shake-Shake-ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
**kwargs):
super(ShakeShakeResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
branch_class = ResBottleneck if bottleneck else ResBlock
with self.name_scope():
self.branch1 = branch_class(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
self.branch2 = branch_class(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_branch = ShakeShakeShortcut(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_branch(x)
else:
identity = x
x1 = self.branch1(x)
x2 = self.branch2(x)
x = ShakeShake()(x1, x2) + identity
x = self.activ(x)
return x
class CIFARShakeShakeResNet(HybridBlock):
"""
Shake-Shake-ResNet model for CIFAR from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARShakeShakeResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ShakeShakeResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_shakeshakeresnet_cifar(classes,
blocks,
bottleneck,
first_stage_channels=16,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create Shake-Shake-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
first_stage_channels : int, default 16
Number of output channels for the first stage.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
from functools import reduce
channels_per_layers = reduce(lambda x, y: x + [x[-1] * 2], range(2), [first_stage_channels])
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARShakeShakeResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def shakeshakeresnet20_2x16d_cifar10(classes=10, **kwargs):
"""
Shake-Shake-ResNet-20-2x16d model for CIFAR-10 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakeshakeresnet_cifar(classes=classes, blocks=20, bottleneck=False, first_stage_channels=16,
model_name="shakeshakeresnet20_2x16d_cifar10", **kwargs)
def shakeshakeresnet20_2x16d_cifar100(classes=100, **kwargs):
"""
Shake-Shake-ResNet-20-2x16d model for CIFAR-100 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakeshakeresnet_cifar(classes=classes, blocks=20, bottleneck=False, first_stage_channels=16,
model_name="shakeshakeresnet20_2x16d_cifar100", **kwargs)
def shakeshakeresnet20_2x16d_svhn(classes=10, **kwargs):
"""
Shake-Shake-ResNet-20-2x16d model for SVHN from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakeshakeresnet_cifar(classes=classes, blocks=20, bottleneck=False, first_stage_channels=16,
model_name="shakeshakeresnet20_2x16d_svhn", **kwargs)
def shakeshakeresnet26_2x32d_cifar10(classes=10, **kwargs):
"""
Shake-Shake-ResNet-26-2x32d model for CIFAR-10 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakeshakeresnet_cifar(classes=classes, blocks=26, bottleneck=False, first_stage_channels=32,
model_name="shakeshakeresnet26_2x32d_cifar10", **kwargs)
def shakeshakeresnet26_2x32d_cifar100(classes=100, **kwargs):
"""
Shake-Shake-ResNet-26-2x32d model for CIFAR-100 from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakeshakeresnet_cifar(classes=classes, blocks=26, bottleneck=False, first_stage_channels=32,
model_name="shakeshakeresnet26_2x32d_cifar100", **kwargs)
def shakeshakeresnet26_2x32d_svhn(classes=10, **kwargs):
"""
Shake-Shake-ResNet-26-2x32d model for SVHN from 'Shake-Shake regularization,' https://arxiv.org/abs/1705.07485.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakeshakeresnet_cifar(classes=classes, blocks=26, bottleneck=False, first_stage_channels=32,
model_name="shakeshakeresnet26_2x32d_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(shakeshakeresnet20_2x16d_cifar10, 10),
(shakeshakeresnet20_2x16d_cifar100, 100),
(shakeshakeresnet20_2x16d_svhn, 10),
(shakeshakeresnet26_2x32d_cifar10, 10),
(shakeshakeresnet26_2x32d_cifar100, 100),
(shakeshakeresnet26_2x32d_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shakeshakeresnet20_2x16d_cifar10 or weight_count == 541082)
assert (model != shakeshakeresnet20_2x16d_cifar100 or weight_count == 546932)
assert (model != shakeshakeresnet20_2x16d_svhn or weight_count == 541082)
assert (model != shakeshakeresnet26_2x32d_cifar10 or weight_count == 2923162)
assert (model != shakeshakeresnet26_2x32d_cifar100 or weight_count == 2934772)
assert (model != shakeshakeresnet26_2x32d_svhn or weight_count == 2923162)
x = mx.nd.zeros((14, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (14, classes))
if __name__ == "__main__":
_test()
| 16,328 | 35.612108 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/wrn_cifar.py | """
WRN for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
"""
__all__ = ['CIFARWRN', 'wrn16_10_cifar10', 'wrn16_10_cifar100', 'wrn16_10_svhn', 'wrn28_10_cifar10',
'wrn28_10_cifar100', 'wrn28_10_svhn', 'wrn40_8_cifar10', 'wrn40_8_cifar100', 'wrn40_8_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3
from .preresnet import PreResUnit, PreResActivation
class CIFARWRN(HybridBlock):
"""
WRN model for CIFAR from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARWRN, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(PreResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=False,
conv1_stride=False))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_wrn_cifar(classes,
blocks,
width_factor,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create WRN model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
width_factor : int
Wide scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert ((blocks - 4) % 6 == 0)
layers = [(blocks - 4) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci * width_factor] * li for (ci, li) in zip(channels_per_layers, layers)]
net = CIFARWRN(
channels=channels,
init_block_channels=init_block_channels,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def wrn16_10_cifar10(classes=10, **kwargs):
"""
WRN-16-10 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=16, width_factor=10, model_name="wrn16_10_cifar10", **kwargs)
def wrn16_10_cifar100(classes=100, **kwargs):
"""
WRN-16-10 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=16, width_factor=10, model_name="wrn16_10_cifar100", **kwargs)
def wrn16_10_svhn(classes=10, **kwargs):
"""
WRN-16-10 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=16, width_factor=10, model_name="wrn16_10_svhn", **kwargs)
def wrn28_10_cifar10(classes=10, **kwargs):
"""
WRN-28-10 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=28, width_factor=10, model_name="wrn28_10_cifar10", **kwargs)
def wrn28_10_cifar100(classes=100, **kwargs):
"""
WRN-28-10 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=28, width_factor=10, model_name="wrn28_10_cifar100", **kwargs)
def wrn28_10_svhn(classes=10, **kwargs):
"""
WRN-28-10 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=28, width_factor=10, model_name="wrn28_10_svhn", **kwargs)
def wrn40_8_cifar10(classes=10, **kwargs):
"""
WRN-40-8 model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=40, width_factor=8, model_name="wrn40_8_cifar10", **kwargs)
def wrn40_8_cifar100(classes=100, **kwargs):
"""
WRN-40-8 model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=40, width_factor=8, model_name="wrn40_8_cifar100", **kwargs)
def wrn40_8_svhn(classes=10, **kwargs):
"""
WRN-40-8 model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn_cifar(classes=classes, blocks=40, width_factor=8, model_name="wrn40_8_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(wrn16_10_cifar10, 10),
(wrn16_10_cifar100, 100),
(wrn16_10_svhn, 10),
(wrn28_10_cifar10, 10),
(wrn28_10_cifar100, 100),
(wrn28_10_svhn, 10),
(wrn40_8_cifar10, 10),
(wrn40_8_cifar100, 100),
(wrn40_8_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != wrn16_10_cifar10 or weight_count == 17116634)
assert (model != wrn16_10_cifar100 or weight_count == 17174324)
assert (model != wrn16_10_svhn or weight_count == 17116634)
assert (model != wrn28_10_cifar10 or weight_count == 36479194)
assert (model != wrn28_10_cifar100 or weight_count == 36536884)
assert (model != wrn28_10_svhn or weight_count == 36479194)
assert (model != wrn40_8_cifar10 or weight_count == 35748314)
assert (model != wrn40_8_cifar100 or weight_count == 35794484)
assert (model != wrn40_8_svhn or weight_count == 35748314)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 12,476 | 34.245763 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/inceptionresnetv2.py | """
InceptionResNetV2 for ImageNet-1K, implemented in Gluon.
Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
"""
__all__ = ['InceptionResNetV2', 'inceptionresnetv2']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent
from .common import conv1x1_block, conv3x3_block
from .inceptionv3 import AvgPoolBranch, Conv1x1Branch, ConvSeqBranch
from .inceptionresnetv1 import InceptionAUnit, InceptionBUnit, InceptionCUnit, ReductionAUnit, ReductionBUnit
class InceptBlock5b(HybridBlock):
"""
InceptionResNetV2 type Mixed-5b block.
Parameters:
----------
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptBlock5b, self).__init__(**kwargs)
in_channels = 192
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=96,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(48, 64),
kernel_size_list=(1, 5),
strides_list=(1, 1),
padding_list=(0, 2),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96, 96),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(AvgPoolBranch(
in_channels=in_channels,
out_channels=64,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptInitBlock(HybridBlock):
"""
InceptionResNetV2 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
strides=2,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
strides=1,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
strides=1,
padding=1,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.pool1 = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0)
self.conv4 = conv1x1_block(
in_channels=64,
out_channels=80,
strides=1,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv5 = conv3x3_block(
in_channels=80,
out_channels=192,
strides=1,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.pool2 = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0)
self.block = InceptBlock5b(bn_epsilon=bn_epsilon, bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.pool1(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.pool2(x)
x = self.block(x)
return x
class InceptionResNetV2(HybridBlock):
"""
InceptionResNetV2 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (299, 299)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
dropout_rate=0.0,
bn_epsilon=1e-5,
bn_use_global_stats=False,
in_channels=3,
in_size=(299, 299),
classes=1000,
**kwargs):
super(InceptionResNetV2, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
layers = [10, 21, 11]
in_channels_list = [320, 1088, 2080]
normal_out_channels_list = [[32, 32, 32, 32, 48, 64], [192, 128, 160, 192], [192, 192, 224, 256]]
reduction_out_channels_list = [[384, 256, 256, 384], [256, 384, 256, 288, 256, 288, 320]]
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(InceptInitBlock(
in_channels=in_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
in_channels = in_channels_list[0]
for i, layers_per_stage in enumerate(layers):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j in range(layers_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
out_channels_list_per_stage = reduction_out_channels_list[i - 1]
else:
unit = normal_units[i]
out_channels_list_per_stage = normal_out_channels_list[i]
if (i == len(layers) - 1) and (j == layers_per_stage - 1):
unit_kwargs = {"scale": 1.0, "activate": False}
else:
unit_kwargs = {}
stage.add(unit(
in_channels=in_channels,
out_channels_list=out_channels_list_per_stage,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
**unit_kwargs))
if (j == 0) and (i != 0):
in_channels = in_channels_list[i]
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=2080,
out_channels=1536,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
if dropout_rate > 0.0:
self.output.add(nn.Dropout(rate=dropout_rate))
self.output.add(nn.Dense(
units=classes,
in_units=1536))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_inceptionresnetv2(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create InceptionResNetV2 model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = InceptionResNetV2(**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def inceptionresnetv2(**kwargs):
"""
InceptionResNetV2 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_inceptionresnetv2(model_name="inceptionresnetv2", bn_epsilon=1e-3, **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
inceptionresnetv2,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != inceptionresnetv2 or weight_count == 55843464)
x = mx.nd.zeros((1, 3, 299, 299), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 11,625 | 34.772308 | 117 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/ghostnet.py | """
GhostNet for ImageNet-1K, implemented in Gluon.
Original paper: 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907.
"""
__all__ = ['GhostNet', 'ghostnet']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import round_channels, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block,\
dwsconv3x3_block, SEBlock
class GhostHSigmoid(HybridBlock):
"""
Approximated sigmoid function, specific for GhostNet.
"""
def __init__(self, **kwargs):
super(GhostHSigmoid, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
return F.clip(x, 0.0, 1.0)
class GhostConvBlock(HybridBlock):
"""
GhostNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
activation : function or str or None, default default nn.Activation('relu')
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
super(GhostConvBlock, self).__init__(**kwargs)
main_out_channels = math.ceil(0.5 * out_channels)
cheap_out_channels = out_channels - main_out_channels
with self.name_scope():
self.main_conv = conv1x1_block(
in_channels=in_channels,
out_channels=main_out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
self.cheap_conv = dwconv3x3_block(
in_channels=main_out_channels,
out_channels=cheap_out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
def hybrid_forward(self, F, x):
x = self.main_conv(x)
y = self.cheap_conv(x)
return F.concat(x, y, dim=1)
class GhostExpBlock(HybridBlock):
"""
GhostNet expansion block for residual path in GhostNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : float
Expansion factor.
use_se : bool
Whether to use SE-module.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_kernel3,
exp_factor,
use_se,
bn_use_global_stats=False,
**kwargs):
super(GhostExpBlock, self).__init__(**kwargs)
self.use_dw_conv = (strides != 1)
self.use_se = use_se
mid_channels = int(math.ceil(exp_factor * in_channels))
with self.name_scope():
self.exp_conv = GhostConvBlock(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
if self.use_dw_conv:
dw_conv_class = dwconv3x3_block if use_kernel3 else dwconv5x5_block
self.dw_conv = dw_conv_class(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=4,
out_activation=GhostHSigmoid())
self.pw_conv = GhostConvBlock(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
x = self.exp_conv(x)
if self.use_dw_conv:
x = self.dw_conv(x)
if self.use_se:
x = self.se(x)
x = self.pw_conv(x)
return x
class GhostUnit(HybridBlock):
"""
GhostNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : float
Expansion factor.
use_se : bool
Whether to use SE-module.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_kernel3,
exp_factor,
use_se,
bn_use_global_stats=False,
**kwargs):
super(GhostUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = GhostExpBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
use_se=use_se,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = dwsconv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
pw_activation=None)
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
return x
class GhostClassifier(HybridBlock):
"""
GhostNet classifier.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
**kwargs):
super(GhostClassifier, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class GhostNet(HybridBlock):
"""
GhostNet model from 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
classifier_mid_channels : int
Number of middle channels for classifier.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
use_se : list of list of int/bool
Using SE-block flag for each unit.
first_stride : bool
Whether to use stride for the first stage.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
classifier_mid_channels,
kernels3,
exp_factors,
use_se,
first_stride,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(GhostNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and ((i != 0) or first_stride) else 1
use_kernel3 = kernels3[i][j] == 1
exp_factor = exp_factors[i][j]
use_se_flag = use_se[i][j] == 1
stage.add(GhostUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
use_se=use_se_flag,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(GhostClassifier(
in_channels=in_channels,
out_channels=classes,
mid_channels=classifier_mid_channels))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_ghostnet(width_scale=1.0,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create GhostNet model with specific parameters.
Parameters:
----------
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 16
channels = [[16], [24, 24], [40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160, 160, 160]]
kernels3 = [[1], [1, 1], [0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0]]
exp_factors = [[1], [3, 3], [3, 3], [6, 2.5, 2.3, 2.3, 6, 6], [6, 6, 6, 6, 6]]
use_se = [[0], [0, 0], [1, 1], [0, 0, 0, 0, 1, 1], [1, 0, 1, 0, 1]]
final_block_channels = 960
classifier_mid_channels = 1280
first_stride = False
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale, divisor=4) for cij in ci] for ci in channels]
init_block_channels = round_channels(init_block_channels * width_scale, divisor=4)
if width_scale > 1.0:
final_block_channels = round_channels(final_block_channels * width_scale, divisor=4)
net = GhostNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
classifier_mid_channels=classifier_mid_channels,
kernels3=kernels3,
exp_factors=exp_factors,
use_se=use_se,
first_stride=first_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def ghostnet(**kwargs):
"""
GhostNet model from 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ghostnet(model_name="ghostnet", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
ghostnet,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ghostnet or weight_count == 5180840)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 15,156 | 33.060674 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/efficientnet.py | """
EfficientNet for ImageNet-1K, implemented in Gluon.
Original papers:
- 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946,
- 'Adversarial Examples Improve Image Recognition,' https://arxiv.org/abs/1911.09665.
"""
__all__ = ['EfficientNet', 'calc_tf_padding', 'EffiInvResUnit', 'EffiInitBlock', 'efficientnet_b0', 'efficientnet_b1',
'efficientnet_b2', 'efficientnet_b3', 'efficientnet_b4', 'efficientnet_b5', 'efficientnet_b6',
'efficientnet_b7', 'efficientnet_b8', 'efficientnet_b0b', 'efficientnet_b1b', 'efficientnet_b2b',
'efficientnet_b3b', 'efficientnet_b4b', 'efficientnet_b5b', 'efficientnet_b6b', 'efficientnet_b7b',
'efficientnet_b0c', 'efficientnet_b1c', 'efficientnet_b2c', 'efficientnet_b3c', 'efficientnet_b4c',
'efficientnet_b5c', 'efficientnet_b6c', 'efficientnet_b7c', 'efficientnet_b8c']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import round_channels, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock
def calc_tf_padding(in_size,
kernel_size,
strides=1,
dilation=1):
"""
Calculate TF-same like padding size.
Parameters:
----------
in_size : tuple of 2 int
Spatial size of input image.
kernel_size : int
Convolution window size.
strides : int, default 1
Strides of the convolution.
dilation : int, default 1
Dilation value for convolution layer.
Returns:
-------
tuple of 4 int
The size of the padding.
"""
height, width = in_size
oh = math.ceil(height / strides)
ow = math.ceil(width / strides)
pad_h = max((oh - 1) * strides + (kernel_size - 1) * dilation + 1 - height, 0)
pad_w = max((ow - 1) * strides + (kernel_size - 1) * dilation + 1 - width, 0)
return 0, 0, 0, 0, pad_h // 2, pad_h - pad_h // 2, pad_w // 2, pad_w - pad_w // 2
class EffiDwsConvUnit(HybridBlock):
"""
EfficientNet specific depthwise separable convolution block/unit with BatchNorms and activations at each convolution
layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : str
Name of activation function.
tf_mode : bool
Whether to use TF-like mode.
in_size : tuple of 2 int, default None
Spatial size of input image.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_epsilon,
bn_use_global_stats,
activation,
tf_mode,
in_size=None,
**kwargs):
super(EffiDwsConvUnit, self).__init__(**kwargs)
self.tf_mode = tf_mode
self.in_size = in_size
self.residual = (in_channels == out_channels) and (strides == 1)
with self.name_scope():
self.dw_conv = dwconv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
padding=(0 if tf_mode else 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
self.se = SEBlock(
channels=in_channels,
reduction=4,
mid_activation=activation)
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
if self.tf_mode:
in_size = self.in_size if self.in_size is not None else x.shape[2:]
# assert (in_size == x.shape[2:])
x = F.pad(x, mode="constant", pad_width=calc_tf_padding(in_size=in_size, kernel_size=3), constant_value=0)
x = self.dw_conv(x)
x = self.se(x)
x = self.pw_conv(x)
if self.residual:
x = x + identity
return x
class EffiInvResUnit(HybridBlock):
"""
EfficientNet inverted residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
exp_factor : int
Factor for expansion of channels.
se_factor : int
SE reduction factor for each unit.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : str
Name of activation function.
tf_mode : bool
Whether to use TF-like mode.
in_size : tuple of 2 int, default None
Spatial size of input image.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
exp_factor,
se_factor,
bn_epsilon,
bn_use_global_stats,
activation,
tf_mode,
in_size=None,
**kwargs):
super(EffiInvResUnit, self).__init__(**kwargs)
self.kernel_size = kernel_size
self.strides = strides
self.tf_mode = tf_mode
self.in_size = in_size
self.residual = (in_channels == out_channels) and (strides == 1)
self.use_se = se_factor > 0
mid_channels = in_channels * exp_factor
dwconv_block_fn = dwconv3x3_block if kernel_size == 3 else (dwconv5x5_block if kernel_size == 5 else None)
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
self.conv2 = dwconv_block_fn(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
padding=(0 if tf_mode else (kernel_size // 2)),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=(exp_factor * se_factor),
mid_activation=activation)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
x = self.conv1(x)
if self.tf_mode:
in_size = self.in_size if self.in_size is not None else x.shape[2:]
# assert (in_size == x.shape[2:])
x = F.pad(x,
mode="constant",
pad_width=calc_tf_padding(in_size=in_size, kernel_size=self.kernel_size, strides=self.strides),
constant_value=0)
x = self.conv2(x)
if self.use_se:
x = self.se(x)
x = self.conv3(x)
if self.residual:
x = x + identity
return x
class EffiInitBlock(HybridBlock):
"""
EfficientNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : str
Name of activation function.
tf_mode : bool
Whether to use TF-like mode.
in_size : tuple of 2 int, default None
Spatial size of input image.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats,
activation,
tf_mode,
in_size=None,
**kwargs):
super(EffiInitBlock, self).__init__(**kwargs)
self.tf_mode = tf_mode
self.in_size = in_size
with self.name_scope():
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
padding=(0 if tf_mode else 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
def hybrid_forward(self, F, x):
if self.tf_mode:
in_size = self.in_size if self.in_size is not None else x.shape[2:]
# assert (in_size == x.shape[2:])
x = F.pad(x,
mode="constant",
pad_width=calc_tf_padding(in_size=in_size, kernel_size=3, strides=2),
constant_value=0)
x = self.conv(x)
return x
class EfficientNet(HybridBlock):
"""
EfficientNet(-B0) model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
kernel_sizes : list of list of int
Number of kernel sizes for each unit.
strides_per_stage : list int
Stride value for the first unit of each stage.
expansion_factors : list of list of int
Number of expansion factors for each unit.
dropout_rate : float, default 0.2
Fraction of the input units to drop. Must be a number between 0 and 1.
tf_mode : bool, default False
Whether to use TF-like mode.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernel_sizes,
strides_per_stage,
expansion_factors,
dropout_rate=0.2,
tf_mode=False,
fixed_size=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(EfficientNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
activation = "swish"
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(EffiInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation,
tf_mode=tf_mode,
in_size=in_size if fixed_size else None))
in_channels = init_block_channels
in_size = (math.ceil(in_size[0] / 2), math.ceil(in_size[1] / 2))
for i, channels_per_stage in enumerate(channels):
kernel_sizes_per_stage = kernel_sizes[i]
expansion_factors_per_stage = expansion_factors[i]
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
kernel_size = kernel_sizes_per_stage[j]
expansion_factor = expansion_factors_per_stage[j]
strides = strides_per_stage[i] if (j == 0) else 1
if i == 0:
stage.add(EffiDwsConvUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation,
tf_mode=tf_mode,
in_size=in_size if fixed_size else None))
else:
stage.add(EffiInvResUnit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
exp_factor=expansion_factor,
se_factor=4,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation,
tf_mode=tf_mode,
in_size=in_size if fixed_size else None))
in_channels = out_channels
if strides > 1:
in_size = (math.ceil(in_size[0] / 2), math.ceil(in_size[1] / 2))
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation))
in_channels = final_block_channels
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
if dropout_rate > 0.0:
self.output.add(nn.Dropout(rate=dropout_rate))
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_efficientnet(version,
in_size,
tf_mode=False,
bn_epsilon=1e-5,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create EfficientNet model with specific parameters.
Parameters:
----------
version : str
Version of EfficientNet ('b0'...'b7').
in_size : tuple of two ints
Spatial size of the expected input image.
tf_mode : bool, default False
Whether to use TF-like mode.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version == "b0":
assert (in_size == (224, 224))
depth_factor = 1.0
width_factor = 1.0
dropout_rate = 0.2
elif version == "b1":
assert (in_size == (240, 240))
depth_factor = 1.1
width_factor = 1.0
dropout_rate = 0.2
elif version == "b2":
assert (in_size == (260, 260))
depth_factor = 1.2
width_factor = 1.1
dropout_rate = 0.3
elif version == "b3":
assert (in_size == (300, 300))
depth_factor = 1.4
width_factor = 1.2
dropout_rate = 0.3
elif version == "b4":
assert (in_size == (380, 380))
depth_factor = 1.8
width_factor = 1.4
dropout_rate = 0.4
elif version == "b5":
assert (in_size == (456, 456))
depth_factor = 2.2
width_factor = 1.6
dropout_rate = 0.4
elif version == "b6":
assert (in_size == (528, 528))
depth_factor = 2.6
width_factor = 1.8
dropout_rate = 0.5
elif version == "b7":
assert (in_size == (600, 600))
depth_factor = 3.1
width_factor = 2.0
dropout_rate = 0.5
elif version == "b8":
assert (in_size == (672, 672))
depth_factor = 3.6
width_factor = 2.2
dropout_rate = 0.5
else:
raise ValueError("Unsupported EfficientNet version {}".format(version))
init_block_channels = 32
layers = [1, 2, 2, 3, 3, 4, 1]
downsample = [1, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 40, 80, 112, 192, 320]
expansion_factors_per_layers = [1, 6, 6, 6, 6, 6, 6]
kernel_sizes_per_layers = [3, 3, 5, 3, 5, 5, 3]
strides_per_stage = [1, 2, 2, 2, 1, 2, 1]
final_block_channels = 1280
layers = [int(math.ceil(li * depth_factor)) for li in layers]
channels_per_layers = [round_channels(ci * width_factor) for ci in channels_per_layers]
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [])
kernel_sizes = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(kernel_sizes_per_layers, layers, downsample), [])
expansion_factors = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(expansion_factors_per_layers, layers, downsample), [])
strides_per_stage = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(strides_per_stage, layers, downsample), [])
strides_per_stage = [si[0] for si in strides_per_stage]
init_block_channels = round_channels(init_block_channels * width_factor)
if width_factor > 1.0:
assert (int(final_block_channels * width_factor) == round_channels(final_block_channels * width_factor))
final_block_channels = round_channels(final_block_channels * width_factor)
net = EfficientNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernel_sizes=kernel_sizes,
strides_per_stage=strides_per_stage,
expansion_factors=expansion_factors,
dropout_rate=dropout_rate,
tf_mode=tf_mode,
bn_epsilon=bn_epsilon,
in_size=in_size,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def efficientnet_b0(in_size=(224, 224), **kwargs):
"""
EfficientNet-B0 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b0", in_size=in_size, model_name="efficientnet_b0", **kwargs)
def efficientnet_b1(in_size=(240, 240), **kwargs):
"""
EfficientNet-B1 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (240, 240)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b1", in_size=in_size, model_name="efficientnet_b1", **kwargs)
def efficientnet_b2(in_size=(260, 260), **kwargs):
"""
EfficientNet-B2 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (260, 260)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b2", in_size=in_size, model_name="efficientnet_b2", **kwargs)
def efficientnet_b3(in_size=(300, 300), **kwargs):
"""
EfficientNet-B3 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b3", in_size=in_size, model_name="efficientnet_b3", **kwargs)
def efficientnet_b4(in_size=(380, 380), **kwargs):
"""
EfficientNet-B4 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (380, 380)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b4", in_size=in_size, model_name="efficientnet_b4", **kwargs)
def efficientnet_b5(in_size=(456, 456), **kwargs):
"""
EfficientNet-B5 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (456, 456)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b5", in_size=in_size, model_name="efficientnet_b5", **kwargs)
def efficientnet_b6(in_size=(528, 528), **kwargs):
"""
EfficientNet-B6 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (528, 528)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b6", in_size=in_size, model_name="efficientnet_b6", **kwargs)
def efficientnet_b7(in_size=(600, 600), **kwargs):
"""
EfficientNet-B7 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (600, 600)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b7", in_size=in_size, model_name="efficientnet_b7", **kwargs)
def efficientnet_b8(in_size=(672, 672), **kwargs):
"""
EfficientNet-B8 model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (672, 672)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b8", in_size=in_size, model_name="efficientnet_b8", **kwargs)
def efficientnet_b0b(in_size=(224, 224), **kwargs):
"""
EfficientNet-B0-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b0", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b0b",
**kwargs)
def efficientnet_b1b(in_size=(240, 240), **kwargs):
"""
EfficientNet-B1-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (240, 240)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b1", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b1b",
**kwargs)
def efficientnet_b2b(in_size=(260, 260), **kwargs):
"""
EfficientNet-B2-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (260, 260)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b2", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b2b",
**kwargs)
def efficientnet_b3b(in_size=(300, 300), **kwargs):
"""
EfficientNet-B3-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b3", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b3b",
**kwargs)
def efficientnet_b4b(in_size=(380, 380), **kwargs):
"""
EfficientNet-B4-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (380, 380)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b4", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b4b",
**kwargs)
def efficientnet_b5b(in_size=(456, 456), **kwargs):
"""
EfficientNet-B5-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (456, 456)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b5", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b5b",
**kwargs)
def efficientnet_b6b(in_size=(528, 528), **kwargs):
"""
EfficientNet-B6-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (528, 528)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b6", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b6b",
**kwargs)
def efficientnet_b7b(in_size=(600, 600), **kwargs):
"""
EfficientNet-B7-b (like TF-implementation) model from 'EfficientNet: Rethinking Model Scaling for Convolutional
Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (600, 600)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b7", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b7b",
**kwargs)
def efficientnet_b0c(in_size=(224, 224), **kwargs):
"""
EfficientNet-B0-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b0", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b0c",
**kwargs)
def efficientnet_b1c(in_size=(240, 240), **kwargs):
"""
EfficientNet-B1-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (240, 240)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b1", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b1c",
**kwargs)
def efficientnet_b2c(in_size=(260, 260), **kwargs):
"""
EfficientNet-B2-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (260, 260)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b2", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b2c",
**kwargs)
def efficientnet_b3c(in_size=(300, 300), **kwargs):
"""
EfficientNet-B3-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b3", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b3c",
**kwargs)
def efficientnet_b4c(in_size=(380, 380), **kwargs):
"""
EfficientNet-B4-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (380, 380)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b4", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b4c",
**kwargs)
def efficientnet_b5c(in_size=(456, 456), **kwargs):
"""
EfficientNet-B5-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (456, 456)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b5", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b5c",
**kwargs)
def efficientnet_b6c(in_size=(528, 528), **kwargs):
"""
EfficientNet-B6-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (528, 528)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b6", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b6c",
**kwargs)
def efficientnet_b7c(in_size=(600, 600), **kwargs):
"""
EfficientNet-B7-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (600, 600)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b7", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b7c",
**kwargs)
def efficientnet_b8c(in_size=(672, 672), **kwargs):
"""
EfficientNet-B8-c (like TF-implementation, trained with AdvProp) model from 'EfficientNet: Rethinking Model Scaling
for Convolutional Neural Networks,' https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (672, 672)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet(version="b8", in_size=in_size, tf_mode=True, bn_epsilon=1e-3, model_name="efficientnet_b8c",
**kwargs)
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def _test():
import mxnet as mx
pretrained = False
fixed_size = True
models = [
efficientnet_b0,
efficientnet_b1,
efficientnet_b2,
efficientnet_b3,
efficientnet_b4,
efficientnet_b5,
efficientnet_b6,
efficientnet_b7,
efficientnet_b8,
efficientnet_b0b,
efficientnet_b1b,
efficientnet_b2b,
efficientnet_b3b,
efficientnet_b4b,
efficientnet_b5b,
efficientnet_b6b,
efficientnet_b7b,
efficientnet_b0c,
efficientnet_b1c,
efficientnet_b2c,
efficientnet_b3c,
efficientnet_b4c,
efficientnet_b5c,
efficientnet_b6c,
efficientnet_b7c,
efficientnet_b8c,
]
for model in models:
net = model(pretrained=pretrained, fixed_size=fixed_size)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net.hybridize()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != efficientnet_b0 or weight_count == 5288548)
assert (model != efficientnet_b1 or weight_count == 7794184)
assert (model != efficientnet_b2 or weight_count == 9109994)
assert (model != efficientnet_b3 or weight_count == 12233232)
assert (model != efficientnet_b4 or weight_count == 19341616)
assert (model != efficientnet_b5 or weight_count == 30389784)
assert (model != efficientnet_b6 or weight_count == 43040704)
assert (model != efficientnet_b7 or weight_count == 66347960)
assert (model != efficientnet_b8 or weight_count == 87413142)
assert (model != efficientnet_b0b or weight_count == 5288548)
assert (model != efficientnet_b1b or weight_count == 7794184)
assert (model != efficientnet_b2b or weight_count == 9109994)
assert (model != efficientnet_b3b or weight_count == 12233232)
assert (model != efficientnet_b4b or weight_count == 19341616)
assert (model != efficientnet_b5b or weight_count == 30389784)
assert (model != efficientnet_b6b or weight_count == 43040704)
assert (model != efficientnet_b7b or weight_count == 66347960)
batch = 4
classes = 1000
x = mx.nd.random.normal(shape=(batch, 3, net.in_size[0], net.in_size[1]), ctx=ctx)
y = net(x)
assert (y.shape == (batch, classes))
if __name__ == "__main__":
_test()
| 44,055 | 37.713533 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/channelnet.py | """
ChannelNet for ImageNet-1K, implemented in Gluon.
Original paper: 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise Convolutions,'
https://arxiv.org/abs/1809.01330.
"""
__all__ = ['ChannelNet', 'channelnet']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import ReLU6
def dwconv3x3(in_channels,
out_channels,
strides,
use_bias=False):
"""
3x3 depthwise version of the standard convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2D(
channels=out_channels,
kernel_size=3,
strides=strides,
padding=1,
groups=out_channels,
use_bias=use_bias,
in_channels=in_channels)
class ChannetConv(HybridBlock):
"""
ChannelNet specific convolution block with Batch normalization and ReLU6 activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
dropout_rate : float, default 0.0
Dropout rate.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
dropout_rate=0.0,
activate=True,
**kwargs):
super(ChannetConv, self).__init__(**kwargs)
self.use_dropout = (dropout_rate > 0.0)
self.activate = activate
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
in_channels=in_channels)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
self.bn = nn.BatchNorm(
in_channels=out_channels)
if self.activate:
self.activ = ReLU6()
def hybrid_forward(self, F, x):
x = self.conv(x)
if self.use_dropout:
x = self.dropout(x)
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def channet_conv1x1(in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
dropout_rate=0.0,
activate=True):
"""
1x1 version of ChannelNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
dropout_rate : float, default 0.0
Dropout rate.
activate : bool, default True
Whether activate the convolution block.
"""
return ChannetConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
groups=groups,
use_bias=use_bias,
dropout_rate=dropout_rate,
activate=activate)
def channet_conv3x3(in_channels,
out_channels,
strides,
padding=1,
dilation=1,
groups=1,
use_bias=False,
dropout_rate=0.0,
activate=True):
"""
3x3 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
dropout_rate : float, default 0.0
Dropout rate.
activate : bool, default True
Whether activate the convolution block.
"""
return ChannetConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
dropout_rate=dropout_rate,
activate=activate)
class ChannetDwsConvBlock(HybridBlock):
"""
ChannelNet specific depthwise separable convolution block with BatchNorms and activations at last convolution
layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
groups : int, default 1
Number of groups.
dropout_rate : float, default 0.0
Dropout rate.
"""
def __init__(self,
in_channels,
out_channels,
strides,
groups=1,
dropout_rate=0.0,
**kwargs):
super(ChannetDwsConvBlock, self).__init__(**kwargs)
with self.name_scope():
self.dw_conv = dwconv3x3(
in_channels=in_channels,
out_channels=in_channels,
strides=strides)
self.pw_conv = channet_conv1x1(
in_channels=in_channels,
out_channels=out_channels,
groups=groups,
dropout_rate=dropout_rate)
def hybrid_forward(self, F, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
class SimpleGroupBlock(HybridBlock):
"""
ChannelNet specific block with a sequence of depthwise separable group convolution layers.
Parameters:
----------
channels : int
Number of input/output channels.
multi_blocks : int
Number of DWS layers in the sequence.
groups : int
Number of groups.
dropout_rate : float
Dropout rate.
"""
def __init__(self,
channels,
multi_blocks,
groups,
dropout_rate,
**kwargs):
super(SimpleGroupBlock, self).__init__(**kwargs)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i in range(multi_blocks):
self.blocks.add(ChannetDwsConvBlock(
in_channels=channels,
out_channels=channels,
strides=1,
groups=groups,
dropout_rate=dropout_rate))
def hybrid_forward(self, F, x):
x = self.blocks(x)
return x
class ChannelwiseConv2d(HybridBlock):
"""
ChannelNet specific block with channel-wise convolution.
Parameters:
----------
groups : int
Number of groups.
dropout_rate : float
Dropout rate.
"""
def __init__(self,
groups,
dropout_rate,
**kwargs):
super(ChannelwiseConv2d, self).__init__(**kwargs)
self.use_dropout = (dropout_rate > 0.0)
with self.name_scope():
self.conv = nn.Conv3D(
channels=groups,
kernel_size=(4 * groups, 1, 1),
strides=(groups, 1, 1),
padding=(2 * groups - 1, 0, 0),
use_bias=False,
in_channels=1)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
x = x.expand_dims(axis=1)
x = self.conv(x)
if self.use_dropout:
x = self.dropout(x)
x = x.reshape((0, -3, -2))
return x
class ConvGroupBlock(HybridBlock):
"""
ChannelNet specific block with a combination of channel-wise convolution, depthwise separable group convolutions.
Parameters:
----------
channels : int
Number of input/output channels.
multi_blocks : int
Number of DWS layers in the sequence.
groups : int
Number of groups.
dropout_rate : float
Dropout rate.
"""
def __init__(self,
channels,
multi_blocks,
groups,
dropout_rate,
**kwargs):
super(ConvGroupBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = ChannelwiseConv2d(
groups=groups,
dropout_rate=dropout_rate)
self.block = SimpleGroupBlock(
channels=channels,
multi_blocks=multi_blocks,
groups=groups,
dropout_rate=dropout_rate)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.block(x)
return x
class ChannetUnit(nn.HybridBlock):
"""
ChannelNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : tuple/list of 2 int
Number of output channels for each sub-block.
strides : int or tuple/list of 2 int
Strides of the convolution.
multi_blocks : int
Number of DWS layers in the sequence.
groups : int
Number of groups.
dropout_rate : float
Dropout rate.
block_names : tuple/list of 2 str
Sub-block names.
merge_type : str
Type of sub-block output merging.
"""
def __init__(self,
in_channels,
out_channels_list,
strides,
multi_blocks,
groups,
dropout_rate,
block_names,
merge_type,
**kwargs):
super(ChannetUnit, self).__init__(**kwargs)
assert (len(block_names) == 2)
assert (merge_type in ["seq", "add", "cat"])
self.merge_type = merge_type
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i, (out_channels, block_name) in enumerate(zip(out_channels_list, block_names)):
strides_i = (strides if i == 0 else 1)
if block_name == "channet_conv3x3":
self.blocks.add(channet_conv3x3(
in_channels=in_channels,
out_channels=out_channels,
strides=strides_i,
dropout_rate=dropout_rate,
activate=False))
elif block_name == "channet_dws_conv_block":
self.blocks.add(ChannetDwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides_i,
dropout_rate=dropout_rate))
elif block_name == "simple_group_block":
self.blocks.add(SimpleGroupBlock(
channels=in_channels,
multi_blocks=multi_blocks,
groups=groups,
dropout_rate=dropout_rate))
elif block_name == "conv_group_block":
self.blocks.add(ConvGroupBlock(
channels=in_channels,
multi_blocks=multi_blocks,
groups=groups,
dropout_rate=dropout_rate))
else:
raise NotImplementedError()
in_channels = out_channels
def hybrid_forward(self, F, x):
x_outs = []
for block in self.blocks._children.values():
x = block(x)
x_outs.append(x)
if self.merge_type == "add":
for i in range(len(x_outs) - 1):
x = x + x_outs[i]
elif self.merge_type == "cat":
x = F.concat(*x_outs, dim=1)
return x
class ChannelNet(HybridBlock):
"""
ChannelNet model from 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise
Convolutions,' https://arxiv.org/abs/1809.01330.
Parameters:
----------
channels : list of list of list of int
Number of output channels for each unit.
block_names : list of list of list of str
Names of blocks for each unit.
block_names : list of list of str
Merge types for each unit.
dropout_rate : float, default 0.0001
Dropout rate.
multi_blocks : int, default 2
Block count architectural parameter.
groups : int, default 2
Group count architectural parameter.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
block_names,
merge_types,
dropout_rate=0.0001,
multi_blocks=2,
groups=2,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ChannelNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) else 1
stage.add(ChannetUnit(
in_channels=in_channels,
out_channels_list=out_channels,
strides=strides,
multi_blocks=multi_blocks,
groups=groups,
dropout_rate=dropout_rate,
block_names=block_names[i][j],
merge_type=merge_types[i][j]))
if merge_types[i][j] == "cat":
in_channels = sum(out_channels)
else:
in_channels = out_channels[-1]
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_channelnet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ChannelNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = [[[32, 64]], [[128, 128]], [[256, 256]], [[512, 512], [512, 512]], [[1024, 1024]]]
block_names = [[["channet_conv3x3", "channet_dws_conv_block"]],
[["channet_dws_conv_block", "channet_dws_conv_block"]],
[["channet_dws_conv_block", "channet_dws_conv_block"]],
[["channet_dws_conv_block", "simple_group_block"], ["conv_group_block", "conv_group_block"]],
[["channet_dws_conv_block", "channet_dws_conv_block"]]]
merge_types = [["cat"], ["cat"], ["cat"], ["add", "add"], ["seq"]]
net = ChannelNet(
channels=channels,
block_names=block_names,
merge_types=merge_types,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def channelnet(**kwargs):
"""
ChannelNet model from 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise
Convolutions,' https://arxiv.org/abs/1809.01330.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_channelnet(model_name="channelnet", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
channelnet,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != channelnet or weight_count == 3875112)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 19,449 | 30.677524 | 117 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/pnasnet.py | """
PNASNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559.
"""
__all__ = ['PNASNet', 'pnasnet5large']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1
from .nasnet import nasnet_dual_path_sequential, nasnet_batch_norm, NasConv, NasDwsConv, NasPathBlock, NASNetInitBlock,\
process_with_padding
class PnasMaxPoolBlock(HybridBlock):
"""
PNASNet specific Max pooling layer with extra padding.
Parameters:
----------
strides : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
"""
def __init__(self,
strides=2,
extra_padding=False,
**kwargs):
super(PnasMaxPoolBlock, self).__init__(**kwargs)
self.extra_padding = extra_padding
with self.name_scope():
self.pool = nn.MaxPool2D(
pool_size=3,
strides=strides,
padding=1)
def hybrid_forward(self, F, x):
if self.extra_padding:
x = process_with_padding(x, F, self.pool)
else:
x = self.pool(x)
return x
def pnas_conv1x1(in_channels,
out_channels,
strides=1):
"""
1x1 version of the PNASNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
"""
return NasConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
groups=1)
class DwsBranch(HybridBlock):
"""
PNASNet specific block with depthwise separable convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
extra_padding=False,
stem=False,
**kwargs):
super(DwsBranch, self).__init__(**kwargs)
assert (not stem) or (not extra_padding)
mid_channels = out_channels if stem else in_channels
padding = kernel_size // 2
with self.name_scope():
self.conv1 = NasDwsConv(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
extra_padding=extra_padding)
self.conv2 = NasDwsConv(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=1,
padding=padding)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
def dws_branch_k3(in_channels,
out_channels,
strides=2,
extra_padding=False,
stem=False):
"""
3x3 version of the PNASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
extra_padding=extra_padding,
stem=stem)
def dws_branch_k5(in_channels,
out_channels,
strides=2,
extra_padding=False,
stem=False):
"""
5x5 version of the PNASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=strides,
extra_padding=extra_padding,
stem=stem)
def dws_branch_k7(in_channels,
out_channels,
strides=2,
extra_padding=False):
"""
7x7 version of the PNASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 2
Strides of the convolution.
extra_padding : bool, default False
Whether to use extra padding.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=strides,
extra_padding=extra_padding,
stem=False)
class PnasMaxPathBlock(HybridBlock):
"""
PNASNet specific `max path` auxiliary block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(PnasMaxPathBlock, self).__init__(**kwargs)
with self.name_scope():
self.maxpool = PnasMaxPoolBlock()
self.conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels)
self.bn = nasnet_batch_norm(channels=out_channels)
def hybrid_forward(self, F, x):
x = self.maxpool(x)
x = self.conv(x)
x = self.bn(x)
return x
class PnasBaseUnit(HybridBlock):
"""
PNASNet base unit.
"""
def __init__(self,
**kwargs):
super(PnasBaseUnit, self).__init__(**kwargs)
def cell_forward(self, F, x, x_prev):
assert (hasattr(self, 'comb0_left'))
x_left = x_prev
x_right = x
x0 = self.comb0_left(x_left) + self.comb0_right(x_left)
x1 = self.comb1_left(x_right) + self.comb1_right(x_right)
x2 = self.comb2_left(x_right) + self.comb2_right(x_right)
x3 = self.comb3_left(x2) + self.comb3_right(x_right)
x4 = self.comb4_left(x_left) + (self.comb4_right(x_right) if self.comb4_right else x_right)
x_out = F.concat(x0, x1, x2, x3, x4, dim=1)
return x_out
class Stem1Unit(PnasBaseUnit):
"""
PNASNet Stem1 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(Stem1Unit, self).__init__(**kwargs)
mid_channels = out_channels // 5
with self.name_scope():
self.conv_1x1 = pnas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.comb0_left = dws_branch_k5(
in_channels=in_channels,
out_channels=mid_channels,
stem=True)
self.comb0_right = PnasMaxPathBlock(
in_channels=in_channels,
out_channels=mid_channels)
self.comb1_left = dws_branch_k7(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb1_right = PnasMaxPoolBlock()
self.comb2_left = dws_branch_k5(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb2_right = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb3_left = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=1)
self.comb3_right = PnasMaxPoolBlock()
self.comb4_left = dws_branch_k3(
in_channels=in_channels,
out_channels=mid_channels,
stem=True)
self.comb4_right = pnas_conv1x1(
in_channels=mid_channels,
out_channels=mid_channels,
strides=2)
def hybrid_forward(self, F, x):
x_prev = x
x = self.conv_1x1(x)
x_out = self.cell_forward(F, x, x_prev)
return x_out
class PnasUnit(PnasBaseUnit):
"""
PNASNet ordinary unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
reduction : bool, default False
Whether to use reduction.
extra_padding : bool, default False
Whether to use extra padding.
match_prev_layer_dimensions : bool, default False
Whether to match previous layer dimensions.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
reduction=False,
extra_padding=False,
match_prev_layer_dimensions=False,
**kwargs):
super(PnasUnit, self).__init__(**kwargs)
mid_channels = out_channels // 5
stride = 2 if reduction else 1
with self.name_scope():
if match_prev_layer_dimensions:
self.conv_prev_1x1 = NasPathBlock(
in_channels=prev_in_channels,
out_channels=mid_channels)
else:
self.conv_prev_1x1 = pnas_conv1x1(
in_channels=prev_in_channels,
out_channels=mid_channels)
self.conv_1x1 = pnas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.comb0_left = dws_branch_k5(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding)
self.comb0_right = PnasMaxPoolBlock(
strides=stride,
extra_padding=extra_padding)
self.comb1_left = dws_branch_k7(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding)
self.comb1_right = PnasMaxPoolBlock(
strides=stride,
extra_padding=extra_padding)
self.comb2_left = dws_branch_k5(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding)
self.comb2_right = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding)
self.comb3_left = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=1)
self.comb3_right = PnasMaxPoolBlock(
strides=stride,
extra_padding=extra_padding)
self.comb4_left = dws_branch_k3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride,
extra_padding=extra_padding)
if reduction:
self.comb4_right = pnas_conv1x1(
in_channels=mid_channels,
out_channels=mid_channels,
strides=stride)
else:
self.comb4_right = None
def hybrid_forward(self, F, x, x_prev):
x_prev = self.conv_prev_1x1(x_prev)
x = self.conv_1x1(x)
x_out = self.cell_forward(F, x, x_prev)
return x_out
class PNASNet(HybridBlock):
"""
PNASNet model from 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
stem1_blocks_channels : list of 2 int
Number of output channels for the Stem1 unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (331, 331)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
stem1_blocks_channels,
in_channels=3,
in_size=(331, 331),
classes=1000,
**kwargs):
super(PNASNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nasnet_dual_path_sequential(
return_two=False,
first_ordinals=2,
last_ordinals=2)
self.features.add(NASNetInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
self.features.add(Stem1Unit(
in_channels=in_channels,
out_channels=stem1_blocks_channels))
prev_in_channels = in_channels
in_channels = stem1_blocks_channels
for i, channels_per_stage in enumerate(channels):
stage = nasnet_dual_path_sequential(prefix="stage{}_".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
reduction = (j == 0)
extra_padding = (j == 0) and (i not in [0, 2])
match_prev_layer_dimensions = (j == 1) or ((j == 0) and (i == 0))
stage.add(PnasUnit(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels,
reduction=reduction,
extra_padding=extra_padding,
match_prev_layer_dimensions=match_prev_layer_dimensions))
prev_in_channels = in_channels
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.Activation("relu"))
self.features.add(nn.AvgPool2D(
pool_size=11,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dropout(rate=0.5))
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_pnasnet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create PNASNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
repeat = 4
init_block_channels = 96
stem_blocks_channels = [270, 540]
norm_channels = [1080, 2160, 4320]
channels = [[ci] * repeat for ci in norm_channels]
stem1_blocks_channels = stem_blocks_channels[0]
channels[0] = [stem_blocks_channels[1]] + channels[0]
net = PNASNet(
channels=channels,
init_block_channels=init_block_channels,
stem1_blocks_channels=stem1_blocks_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def pnasnet5large(**kwargs):
"""
PNASNet-5-Large model from 'Progressive Neural Architecture Search,' https://arxiv.org/abs/1712.00559.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pnasnet(model_name="pnasnet5large", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
pnasnet5large,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != pnasnet5large or weight_count == 86057668)
x = mx.nd.zeros((1, 3, 331, 331), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 18,996 | 30.091653 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/efficientnetedge.py | """
EfficientNet-Edge for ImageNet-1K, implemented in Gluon.
Original paper: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
"""
__all__ = ['EfficientNetEdge', 'efficientnet_edge_small_b', 'efficientnet_edge_medium_b', 'efficientnet_edge_large_b']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import round_channels, conv1x1_block, conv3x3_block, SEBlock
from .efficientnet import EffiInvResUnit, EffiInitBlock
class EffiEdgeResUnit(HybridBlock):
"""
EfficientNet-Edge edge residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
exp_factor : int
Factor for expansion of channels.
se_factor : int
SE reduction factor for each unit.
mid_from_in : bool
Whether to use input channel count for middle channel count calculation.
use_skip : bool
Whether to use skip connection.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : str
Name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
strides,
exp_factor,
se_factor,
mid_from_in,
use_skip,
bn_epsilon,
bn_use_global_stats,
activation,
**kwargs):
super(EffiEdgeResUnit, self).__init__(**kwargs)
self.residual = (in_channels == out_channels) and (strides == 1) and use_skip
self.use_se = se_factor > 0
mid_channels = in_channels * exp_factor if mid_from_in else out_channels * exp_factor
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=(exp_factor * se_factor),
mid_activation=activation)
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=strides,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
x = self.conv1(x)
if self.use_se:
x = self.se(x)
x = self.conv2(x)
if self.residual:
x = x + identity
return x
class EfficientNetEdge(HybridBlock):
"""
EfficientNet-Edge model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
kernel_sizes : list of list of int
Number of kernel sizes for each unit.
strides_per_stage : list int
Stride value for the first unit of each stage.
expansion_factors : list of list of int
Number of expansion factors for each unit.
dropout_rate : float, default 0.2
Fraction of the input units to drop. Must be a number between 0 and 1.
tf_mode : bool, default False
Whether to use TF-like mode.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernel_sizes,
strides_per_stage,
expansion_factors,
dropout_rate=0.2,
tf_mode=False,
bn_epsilon=1e-5,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(EfficientNetEdge, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
activation = "relu"
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(EffiInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation,
tf_mode=tf_mode))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
kernel_sizes_per_stage = kernel_sizes[i]
expansion_factors_per_stage = expansion_factors[i]
mid_from_in = (i != 0)
use_skip = (i != 0)
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
kernel_size = kernel_sizes_per_stage[j]
expansion_factor = expansion_factors_per_stage[j]
strides = strides_per_stage[i] if (j == 0) else 1
if i < 3:
stage.add(EffiEdgeResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
exp_factor=expansion_factor,
se_factor=0,
mid_from_in=mid_from_in,
use_skip=use_skip,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation))
else:
stage.add(EffiInvResUnit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
exp_factor=expansion_factor,
se_factor=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation,
tf_mode=tf_mode))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation))
in_channels = final_block_channels
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
if dropout_rate > 0.0:
self.output.add(nn.Dropout(rate=dropout_rate))
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_efficientnet_edge(version,
in_size,
tf_mode=False,
bn_epsilon=1e-5,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create EfficientNet-Edge model with specific parameters.
Parameters:
----------
version : str
Version of EfficientNet ('small', 'medium', 'large').
in_size : tuple of two ints
Spatial size of the expected input image.
tf_mode : bool, default False
Whether to use TF-like mode.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
dropout_rate = 0.0
if version == "small":
assert (in_size == (224, 224))
depth_factor = 1.0
width_factor = 1.0
# dropout_rate = 0.2
elif version == "medium":
assert (in_size == (240, 240))
depth_factor = 1.1
width_factor = 1.0
# dropout_rate = 0.2
elif version == "large":
assert (in_size == (300, 300))
depth_factor = 1.4
width_factor = 1.2
# dropout_rate = 0.3
else:
raise ValueError("Unsupported EfficientNet-Edge version {}".format(version))
init_block_channels = 32
layers = [1, 2, 4, 5, 4, 2]
downsample = [1, 1, 1, 1, 0, 1]
channels_per_layers = [24, 32, 48, 96, 144, 192]
expansion_factors_per_layers = [4, 8, 8, 8, 8, 8]
kernel_sizes_per_layers = [3, 3, 3, 5, 5, 5]
strides_per_stage = [1, 2, 2, 2, 1, 2]
final_block_channels = 1280
layers = [int(math.ceil(li * depth_factor)) for li in layers]
channels_per_layers = [round_channels(ci * width_factor) for ci in channels_per_layers]
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [])
kernel_sizes = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(kernel_sizes_per_layers, layers, downsample), [])
expansion_factors = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(expansion_factors_per_layers, layers, downsample), [])
strides_per_stage = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(strides_per_stage, layers, downsample), [])
strides_per_stage = [si[0] for si in strides_per_stage]
init_block_channels = round_channels(init_block_channels * width_factor)
if width_factor > 1.0:
assert (int(final_block_channels * width_factor) == round_channels(final_block_channels * width_factor))
final_block_channels = round_channels(final_block_channels * width_factor)
net = EfficientNetEdge(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernel_sizes=kernel_sizes,
strides_per_stage=strides_per_stage,
expansion_factors=expansion_factors,
dropout_rate=dropout_rate,
tf_mode=tf_mode,
bn_epsilon=bn_epsilon,
in_size=in_size,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def efficientnet_edge_small_b(in_size=(224, 224), **kwargs):
"""
EfficientNet-Edge-Small-b model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet_edge(version="small", in_size=in_size, tf_mode=True, bn_epsilon=1e-3,
model_name="efficientnet_edge_small_b", **kwargs)
def efficientnet_edge_medium_b(in_size=(240, 240), **kwargs):
"""
EfficientNet-Edge-Medium-b model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (240, 240)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet_edge(version="medium", in_size=in_size, tf_mode=True, bn_epsilon=1e-3,
model_name="efficientnet_edge_medium_b", **kwargs)
def efficientnet_edge_large_b(in_size=(300, 300), **kwargs):
"""
EfficientNet-Edge-Large-b model from 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks,'
https://arxiv.org/abs/1905.11946.
Parameters:
----------
in_size : tuple of two ints, default (300, 300)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_efficientnet_edge(version="large", in_size=in_size, tf_mode=True, bn_epsilon=1e-3,
model_name="efficientnet_edge_large_b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
efficientnet_edge_small_b,
efficientnet_edge_medium_b,
efficientnet_edge_large_b,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != efficientnet_edge_small_b or weight_count == 5438392)
assert (model != efficientnet_edge_medium_b or weight_count == 6899496)
assert (model != efficientnet_edge_large_b or weight_count == 10589712)
x = mx.nd.zeros((1, 3, net.in_size[0], net.in_size[1]), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 16,355 | 37.850356 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/ibnresnext.py | """
IBN-ResNeXt for ImageNet-1K, implemented in Gluon.
Original paper: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431.
"""
__all__ = ['IBNResNeXt', 'ibn_resnext50_32x4d', 'ibn_resnext101_32x4d', 'ibn_resnext101_64x4d']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
from .resnet import ResInitBlock
from .ibnresnet import ibn_conv1x1_block
class IBNResNeXtBottleneck(HybridBlock):
"""
IBN-ResNeXt bottleneck block for residual path in IBN-ResNeXt unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
conv1_ibn : bool
Whether to use IBN normalization in the first convolution layer of the block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
conv1_ibn,
bn_use_global_stats,
**kwargs):
super(IBNResNeXtBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
with self.name_scope():
self.conv1 = ibn_conv1x1_block(
in_channels=in_channels,
out_channels=group_width,
use_ibn=conv1_ibn,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=group_width,
out_channels=group_width,
strides=strides,
groups=cardinality,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class IBNResNeXtUnit(HybridBlock):
"""
IBN-ResNeXt unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
conv1_ibn : bool
Whether to use IBN normalization in the first convolution layer of the block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
conv1_ibn,
bn_use_global_stats,
**kwargs):
super(IBNResNeXtUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = IBNResNeXtBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
conv1_ibn=conv1_ibn,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class IBNResNeXt(HybridBlock):
"""
IBN-ResNeXt model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(IBNResNeXt, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
conv1_ibn = (out_channels < 2048)
stage.add(IBNResNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
conv1_ibn=conv1_ibn,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_ibnresnext(blocks,
cardinality,
bottleneck_width,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create IBN-ResNeXt model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported IBN-ResNeXt with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = IBNResNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def ibn_resnext50_32x4d(**kwargs):
"""
IBN-ResNeXt-50 (32x4d) model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibnresnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="ibn_resnext50_32x4d", **kwargs)
def ibn_resnext101_32x4d(**kwargs):
"""
IBN-ResNeXt-101 (32x4d) model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibnresnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="ibn_resnext101_32x4d", **kwargs)
def ibn_resnext101_64x4d(**kwargs):
"""
IBN-ResNeXt-101 (64x4d) model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibnresnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="ibn_resnext101_64x4d", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
ibn_resnext50_32x4d,
ibn_resnext101_32x4d,
ibn_resnext101_64x4d,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ibn_resnext50_32x4d or weight_count == 25028904)
assert (model != ibn_resnext101_32x4d or weight_count == 44177704)
assert (model != ibn_resnext101_64x4d or weight_count == 83455272)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 12,457 | 32.853261 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/squeezenext.py | """
SqueezeNext for ImageNet-1K, implemented in Gluon.
Original paper: 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
"""
__all__ = ['SqueezeNext', 'sqnxt23_w1', 'sqnxt23_w3d2', 'sqnxt23_w2', 'sqnxt23v5_w1', 'sqnxt23v5_w3d2', 'sqnxt23v5_w2']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import ConvBlock, conv1x1_block, conv7x7_block
class SqnxtUnit(HybridBlock):
"""
SqueezeNext unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
strides,
**kwargs):
super(SqnxtUnit, self).__init__(**kwargs)
if strides == 2:
reduction_den = 1
self.resize_identity = True
elif in_channels > out_channels:
reduction_den = 4
self.resize_identity = True
else:
reduction_den = 2
self.resize_identity = False
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=(in_channels // reduction_den),
strides=strides,
use_bias=True)
self.conv2 = conv1x1_block(
in_channels=(in_channels // reduction_den),
out_channels=(in_channels // (2 * reduction_den)),
use_bias=True)
self.conv3 = ConvBlock(
in_channels=(in_channels // (2 * reduction_den)),
out_channels=(in_channels // reduction_den),
kernel_size=(1, 3),
strides=1,
padding=(0, 1),
use_bias=True)
self.conv4 = ConvBlock(
in_channels=(in_channels // reduction_den),
out_channels=(in_channels // reduction_den),
kernel_size=(3, 1),
strides=1,
padding=(1, 0),
use_bias=True)
self.conv5 = conv1x1_block(
in_channels=(in_channels // reduction_den),
out_channels=out_channels,
use_bias=True)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=True)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = x + identity
x = self.activ(x)
return x
class SqnxtInitBlock(HybridBlock):
"""
SqueezeNext specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(SqnxtInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
padding=1,
use_bias=True)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
ceil_mode=True)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pool(x)
return x
class SqueezeNext(HybridBlock):
"""
SqueezeNext model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SqueezeNext, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(SqnxtInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SqnxtUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
use_bias=True))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_squeezenext(version,
width_scale,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SqueezeNext model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('23' or '23v5').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 64
final_block_channels = 128
channels_per_layers = [32, 64, 128, 256]
if version == '23':
layers = [6, 6, 8, 1]
elif version == '23v5':
layers = [2, 4, 14, 1]
else:
raise ValueError("Unsupported SqueezeNet version {}".format(version))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
final_block_channels = int(final_block_channels * width_scale)
net = SqueezeNext(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def sqnxt23_w1(**kwargs):
"""
1.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23", width_scale=1.0, model_name="sqnxt23_w1", **kwargs)
def sqnxt23_w3d2(**kwargs):
"""
1.5-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23", width_scale=1.5, model_name="sqnxt23_w3d2", **kwargs)
def sqnxt23_w2(**kwargs):
"""
2.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23", width_scale=2.0, model_name="sqnxt23_w2", **kwargs)
def sqnxt23v5_w1(**kwargs):
"""
1.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23v5", width_scale=1.0, model_name="sqnxt23v5_w1", **kwargs)
def sqnxt23v5_w3d2(**kwargs):
"""
1.5-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23v5", width_scale=1.5, model_name="sqnxt23v5_w3d2", **kwargs)
def sqnxt23v5_w2(**kwargs):
"""
2.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenext(version="23v5", width_scale=2.0, model_name="sqnxt23v5_w2", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
sqnxt23_w1,
sqnxt23_w3d2,
sqnxt23_w2,
sqnxt23v5_w1,
sqnxt23v5_w3d2,
sqnxt23v5_w2,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sqnxt23_w1 or weight_count == 724056)
assert (model != sqnxt23_w3d2 or weight_count == 1511824)
assert (model != sqnxt23_w2 or weight_count == 2583752)
assert (model != sqnxt23v5_w1 or weight_count == 921816)
assert (model != sqnxt23v5_w3d2 or weight_count == 1953616)
assert (model != sqnxt23v5_w2 or weight_count == 3366344)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 13,315 | 32.124378 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/xdensenet.py | """
X-DenseNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
"""
__all__ = ['XDenseNet', 'xdensenet121_2', 'xdensenet161_2', 'xdensenet169_2', 'xdensenet201_2', 'pre_xconv3x3_block',
'XDenseUnit']
import os
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .preresnet import PreResInitBlock, PreResActivation
from .densenet import TransitionBlock
@mx.init.register
class XMaskInit(mx.init.Initializer):
"""
Returns an initializer performing "X-Net" initialization for masks.
Parameters:
----------
expand_ratio : int
Ratio of expansion.
"""
def __init__(self,
expand_ratio,
**kwargs):
super(XMaskInit, self).__init__(**kwargs)
assert (expand_ratio > 0)
self.expand_ratio = expand_ratio
def _init_weight(self, _, arr):
shape = arr.shape
expand_size = max(shape[1] // self.expand_ratio, 1)
shape1_arange = mx.nd.arange(shape[1], ctx=arr.context)
arr[:] = 0
for i in range(shape[0]):
jj = mx.nd.random.shuffle(shape1_arange)[:expand_size]
arr[i, jj, :, :] = 1
class XConv2D(nn.Conv2D):
"""
X-Convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
groups : int, default 1
Number of groups.
expand_ratio : int, default 2
Ratio of expansion.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
groups=1,
expand_ratio=2,
**kwargs):
super(XConv2D, self).__init__(
in_channels=in_channels,
channels=out_channels,
kernel_size=kernel_size,
groups=groups,
**kwargs)
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
grouped_in_channels = in_channels // groups
self.mask = self.params.get(
name="mask",
grad_req="null",
shape=(out_channels, grouped_in_channels, kernel_size[0], kernel_size[1]),
init=XMaskInit(expand_ratio=expand_ratio),
differentiable=False)
def hybrid_forward(self, F, x, weight, bias=None, mask=None):
masked_weight = weight * mask
return super(XConv2D, self).hybrid_forward(F, x, weight=masked_weight, bias=bias)
class PreXConvBlock(HybridBlock):
"""
X-Convolution block with Batch normalization and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
activate : bool, default True
Whether activate the convolution block.
expand_ratio : int, default 2
Ratio of expansion.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
bn_use_global_stats=False,
return_preact=False,
activate=True,
expand_ratio=2,
**kwargs):
super(PreXConvBlock, self).__init__(**kwargs)
self.return_preact = return_preact
self.activate = activate
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
if self.activate:
self.activ = nn.Activation("relu")
self.conv = XConv2D(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
expand_ratio=expand_ratio)
def hybrid_forward(self, F, x):
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_xconv1x1_block(in_channels,
out_channels,
strides=1,
use_bias=False,
bn_use_global_stats=False,
return_preact=False,
activate=True,
expand_ratio=2):
"""
1x1 version of the pre-activated x-convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
expand_ratio : int, default 2
Ratio of expansion.
"""
return PreXConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
use_bias=use_bias,
bn_use_global_stats=bn_use_global_stats,
return_preact=return_preact,
activate=activate,
expand_ratio=expand_ratio)
def pre_xconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
groups=1,
bn_use_global_stats=False,
return_preact=False,
activate=True,
expand_ratio=2):
"""
3x3 version of the pre-activated x-convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
expand_ratio : int, default 2
Ratio of expansion.
"""
return PreXConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
bn_use_global_stats=bn_use_global_stats,
return_preact=return_preact,
activate=activate,
expand_ratio=expand_ratio)
class XDenseUnit(HybridBlock):
"""
X-DenseNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
expand_ratio : int
Ratio of expansion.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
dropout_rate,
expand_ratio,
**kwargs):
super(XDenseUnit, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
bn_size = 4
inc_channels = out_channels - in_channels
mid_channels = inc_channels * bn_size
with self.name_scope():
self.conv1 = pre_xconv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
expand_ratio=expand_ratio)
self.conv2 = pre_xconv3x3_block(
in_channels=mid_channels,
out_channels=inc_channels,
bn_use_global_stats=bn_use_global_stats,
expand_ratio=expand_ratio)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
if self.use_dropout:
x = self.dropout(x)
x = F.concat(identity, x, dim=1)
return x
class XDenseNet(HybridBlock):
"""
X-DenseNet model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
expand_ratio : int, default 2
Ratio of expansion.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bn_use_global_stats=False,
dropout_rate=0.0,
expand_ratio=2,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(XDenseNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
if i != 0:
stage.add(TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2),
bn_use_global_stats=bn_use_global_stats))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
stage.add(XDenseUnit(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
dropout_rate=dropout_rate,
expand_ratio=expand_ratio))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_xdensenet(blocks,
expand_ratio=2,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create X-DenseNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
expand_ratio : int, default 2
Ratio of expansion.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 121:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif blocks == 161:
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif blocks == 169:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif blocks == 201:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
else:
raise ValueError("Unsupported X-DenseNet version with number of layers {}".format(blocks))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = XDenseNet(
channels=channels,
init_block_channels=init_block_channels,
expand_ratio=expand_ratio,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def xdensenet121_2(**kwargs):
"""
X-DenseNet-121-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_xdensenet(blocks=121, model_name="xdensenet121_2", **kwargs)
def xdensenet161_2(**kwargs):
"""
X-DenseNet-161-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_xdensenet(blocks=161, model_name="xdensenet161_2", **kwargs)
def xdensenet169_2(**kwargs):
"""
X-DenseNet-169-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_xdensenet(blocks=169, model_name="xdensenet169_2", **kwargs)
def xdensenet201_2(**kwargs):
"""
X-DenseNet-201-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_xdensenet(blocks=201, model_name="xdensenet201_2", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
xdensenet121_2,
xdensenet161_2,
xdensenet169_2,
xdensenet201_2,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != xdensenet121_2 or weight_count == 7978856)
assert (model != xdensenet161_2 or weight_count == 28681000)
assert (model != xdensenet169_2 or weight_count == 14149480)
assert (model != xdensenet201_2 or weight_count == 20013928)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 19,138 | 31.94148 | 117 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/diaresnet_cifar.py | """
DIA-ResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
"""
__all__ = ['CIFARDIAResNet', 'diaresnet20_cifar10', 'diaresnet20_cifar100', 'diaresnet20_svhn', 'diaresnet56_cifar10',
'diaresnet56_cifar100', 'diaresnet56_svhn', 'diaresnet110_cifar10', 'diaresnet110_cifar100',
'diaresnet110_svhn', 'diaresnet164bn_cifar10', 'diaresnet164bn_cifar100', 'diaresnet164bn_svhn',
'diaresnet1001_cifar10', 'diaresnet1001_cifar100', 'diaresnet1001_svhn', 'diaresnet1202_cifar10',
'diaresnet1202_cifar100', 'diaresnet1202_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3_block, DualPathSequential
from .diaresnet import DIAAttention, DIAResUnit
class CIFARDIAResNet(HybridBlock):
"""
DIA-ResNet model for CIFAR from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10):
super(CIFARDIAResNet, self).__init__()
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential(
return_two=False,
prefix="stage{}_".format(i + 1))
attention = DIAAttention(
in_x_features=channels_per_stage[0],
in_h_features=channels_per_stage[0])
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
with stage.name_scope():
stage.add(DIAResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=False,
attention=attention))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_diaresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DIA-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARDIAResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def diaresnet20_cifar10(classes=10, **kwargs):
"""
DIA-ResNet-20 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="diaresnet20_cifar10",
**kwargs)
def diaresnet20_cifar100(classes=100, **kwargs):
"""
DIA-ResNet-20 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="diaresnet20_cifar100",
**kwargs)
def diaresnet20_svhn(classes=10, **kwargs):
"""
DIA-ResNet-20 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="diaresnet20_svhn",
**kwargs)
def diaresnet56_cifar10(classes=10, **kwargs):
"""
DIA-ResNet-56 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="diaresnet56_cifar10",
**kwargs)
def diaresnet56_cifar100(classes=100, **kwargs):
"""
DIA-ResNet-56 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="diaresnet56_cifar100",
**kwargs)
def diaresnet56_svhn(classes=10, **kwargs):
"""
DIA-ResNet-56 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="diaresnet56_svhn",
**kwargs)
def diaresnet110_cifar10(classes=10, **kwargs):
"""
DIA-ResNet-110 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="diaresnet110_cifar10",
**kwargs)
def diaresnet110_cifar100(classes=100, **kwargs):
"""
DIA-ResNet-110 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="diaresnet110_cifar100",
**kwargs)
def diaresnet110_svhn(classes=10, **kwargs):
"""
DIA-ResNet-110 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="diaresnet110_svhn",
**kwargs)
def diaresnet164bn_cifar10(classes=10, **kwargs):
"""
DIA-ResNet-164(BN) model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="diaresnet164bn_cifar10",
**kwargs)
def diaresnet164bn_cifar100(classes=100, **kwargs):
"""
DIA-ResNet-164(BN) model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="diaresnet164bn_cifar100",
**kwargs)
def diaresnet164bn_svhn(classes=10, **kwargs):
"""
DIA-ResNet-164(BN) model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="diaresnet164bn_svhn",
**kwargs)
def diaresnet1001_cifar10(classes=10, **kwargs):
"""
DIA-ResNet-1001 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="diaresnet1001_cifar10",
**kwargs)
def diaresnet1001_cifar100(classes=100, **kwargs):
"""
DIA-ResNet-1001 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="diaresnet1001_cifar100",
**kwargs)
def diaresnet1001_svhn(classes=10, **kwargs):
"""
DIA-ResNet-1001 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="diaresnet1001_svhn",
**kwargs)
def diaresnet1202_cifar10(classes=10, **kwargs):
"""
DIA-ResNet-1202 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="diaresnet1202_cifar10",
**kwargs)
def diaresnet1202_cifar100(classes=100, **kwargs):
"""
DIA-ResNet-1202 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="diaresnet1202_cifar100",
**kwargs)
def diaresnet1202_svhn(classes=10, **kwargs):
"""
DIA-ResNet-1202 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="diaresnet1202_svhn",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(diaresnet20_cifar10, 10),
(diaresnet20_cifar100, 100),
(diaresnet20_svhn, 10),
(diaresnet56_cifar10, 10),
(diaresnet56_cifar100, 100),
(diaresnet56_svhn, 10),
(diaresnet110_cifar10, 10),
(diaresnet110_cifar100, 100),
(diaresnet110_svhn, 10),
(diaresnet164bn_cifar10, 10),
(diaresnet164bn_cifar100, 100),
(diaresnet164bn_svhn, 10),
(diaresnet1001_cifar10, 10),
(diaresnet1001_cifar100, 100),
(diaresnet1001_svhn, 10),
(diaresnet1202_cifar10, 10),
(diaresnet1202_cifar100, 100),
(diaresnet1202_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != diaresnet20_cifar10 or weight_count == 286866)
assert (model != diaresnet20_cifar100 or weight_count == 292716)
assert (model != diaresnet20_svhn or weight_count == 286866)
assert (model != diaresnet56_cifar10 or weight_count == 870162)
assert (model != diaresnet56_cifar100 or weight_count == 876012)
assert (model != diaresnet56_svhn or weight_count == 870162)
assert (model != diaresnet110_cifar10 or weight_count == 1745106)
assert (model != diaresnet110_cifar100 or weight_count == 1750956)
assert (model != diaresnet110_svhn or weight_count == 1745106)
assert (model != diaresnet164bn_cifar10 or weight_count == 1923002)
assert (model != diaresnet164bn_cifar100 or weight_count == 1946132)
assert (model != diaresnet164bn_svhn or weight_count == 1923002)
assert (model != diaresnet1001_cifar10 or weight_count == 10547450)
assert (model != diaresnet1001_cifar100 or weight_count == 10570580)
assert (model != diaresnet1001_svhn or weight_count == 10547450)
assert (model != diaresnet1202_cifar10 or weight_count == 19438418)
assert (model != diaresnet1202_cifar100 or weight_count == 19444268)
assert (model != diaresnet1202_svhn or weight_count == 19438418)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 21,836 | 36.137755 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/resdropresnet_cifar.py | """
ResDrop-ResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382.
"""
__all__ = ['CIFARResDropResNet', 'resdropresnet20_cifar10', 'resdropresnet20_cifar100', 'resdropresnet20_svhn']
import os
import numpy as np
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
from .resnet import ResBlock, ResBottleneck
class ResDropResUnit(HybridBlock):
"""
ResDrop-ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_prob : float
Residual branch life probability.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
life_prob,
**kwargs):
super(ResDropResUnit, self).__init__(**kwargs)
self.life_prob = life_prob
self.resize_identity = (in_channels != out_channels) or (strides != 1)
body_class = ResBottleneck if bottleneck else ResBlock
with self.name_scope():
self.body = body_class(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
if mx.autograd.is_training():
b = np.random.binomial(n=1, p=self.life_prob)
x = float(b) / self.life_prob * x
x = x + identity
x = self.activ(x)
return x
class CIFARResDropResNet(HybridBlock):
"""
ResDrop-ResNet model for CIFAR from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_probs : list of float
Residual branch life probability for each unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
life_probs,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARResDropResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
k = 0
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ResDropResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
life_prob=life_probs[k]))
in_channels = out_channels
k += 1
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_resdropresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ResDrop-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
channels_per_layers = [16, 32, 64]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
total_layers = sum(layers)
final_death_prob = 0.5
life_probs = [1.0 - float(i + 1) / float(total_layers) * final_death_prob for i in range(total_layers)]
net = CIFARResDropResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
life_probs=life_probs,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def resdropresnet20_cifar10(classes=10, **kwargs):
"""
ResDrop-ResNet-20 model for CIFAR-10 from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resdropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resdropresnet20_cifar10",
**kwargs)
def resdropresnet20_cifar100(classes=100, **kwargs):
"""
ResDrop-ResNet-20 model for CIFAR-100 from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resdropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resdropresnet20_cifar100",
**kwargs)
def resdropresnet20_svhn(classes=10, **kwargs):
"""
ResDrop-ResNet-20 model for SVHN from 'Deep Networks with Stochastic Depth,' https://arxiv.org/abs/1603.09382.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resdropresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resdropresnet20_svhn",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(resdropresnet20_cifar10, 10),
(resdropresnet20_cifar100, 100),
(resdropresnet20_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resdropresnet20_cifar10 or weight_count == 272474)
assert (model != resdropresnet20_cifar100 or weight_count == 278324)
assert (model != resdropresnet20_svhn or weight_count == 272474)
x = mx.nd.zeros((14, 3, 32, 32), ctx=ctx)
y = net(x)
# with mx.autograd.record():
# y = net(x)
# y.backward()
assert (y.shape == (14, classes))
if __name__ == "__main__":
_test()
| 11,219 | 33.62963 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/bisenet.py | """
BiSeNet for CelebAMask-HQ, implemented in Gluon.
Original paper: 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1808.00897.
"""
__all__ = ['BiSeNet', 'bisenet_resnet18_celebamaskhq']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv1x1_block, conv3x3_block, InterpolationBlock, MultiOutputSequential
from .resnet import resnet18
class PyramidPoolingZeroBranch(HybridBlock):
"""
Pyramid pooling zero branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
in_size : tuple of 2 int
Spatial size of output image for the upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
in_size,
**kwargs):
super(PyramidPoolingZeroBranch, self).__init__(**kwargs)
self.in_size = in_size
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels)
self.up = InterpolationBlock(scale_factor=None)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.in_size is not None else x.shape[2:]
x = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
x = self.conv(x)
x = self.up(x, in_size)
return x
class AttentionRefinementBlock(HybridBlock):
"""
Attention refinement block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(AttentionRefinementBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels)
self.conv2 = conv1x1_block(
in_channels=out_channels,
out_channels=out_channels,
activation=(lambda: nn.Activation("sigmoid")))
def hybrid_forward(self, F, x):
x = self.conv1(x)
w = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
w = self.conv2(w)
x = x * w
return x
class PyramidPoolingMainBranch(HybridBlock):
"""
Pyramid pooling main branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
scale_factor : float
Multiplier for spatial size.
"""
def __init__(self,
in_channels,
out_channels,
scale_factor,
**kwargs):
super(PyramidPoolingMainBranch, self).__init__(**kwargs)
with self.name_scope():
self.att = AttentionRefinementBlock(
in_channels=in_channels,
out_channels=out_channels)
self.up = InterpolationBlock(
scale_factor=scale_factor,
bilinear=False)
self.conv = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels)
def hybrid_forward(self, F, x, y):
x = self.att(x)
x = x + y
x = self.up(x)
x = self.conv(x)
return x
class FeatureFusion(HybridBlock):
"""
Feature fusion block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
reduction : int, default 4
Squeeze reduction value.
"""
def __init__(self,
in_channels,
out_channels,
reduction=4,
**kwargs):
super(FeatureFusion, self).__init__(**kwargs)
mid_channels = out_channels // reduction
with self.name_scope():
self.conv_merge = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels)
self.conv1 = conv1x1(
in_channels=out_channels,
out_channels=mid_channels)
self.activ = nn.Activation("relu")
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels)
self.sigmoid = nn.Activation("sigmoid")
def hybrid_forward(self, F, x, y):
x = F.concat(x, y, dim=1)
x = self.conv_merge(x)
w = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
w = self.conv1(w)
w = self.activ(w)
w = self.conv2(w)
w = self.sigmoid(w)
x_att = x * w
x = x + x_att
return x
class PyramidPooling(HybridBlock):
"""
Pyramid Pooling module.
Parameters:
----------
x16_in_channels : int
Number of input channels for x16.
x32_in_channels : int
Number of input channels for x32.
y_out_channels : int
Number of output channels for y-outputs.
y32_out_size : tuple of 2 int
Spatial size of the y32 tensor.
"""
def __init__(self,
x16_in_channels,
x32_in_channels,
y_out_channels,
y32_out_size,
**kwargs):
super(PyramidPooling, self).__init__(**kwargs)
z_out_channels = 2 * y_out_channels
with self.name_scope():
self.pool32 = PyramidPoolingZeroBranch(
in_channels=x32_in_channels,
out_channels=y_out_channels,
in_size=y32_out_size)
self.pool16 = PyramidPoolingMainBranch(
in_channels=x32_in_channels,
out_channels=y_out_channels,
scale_factor=2)
self.pool8 = PyramidPoolingMainBranch(
in_channels=x16_in_channels,
out_channels=y_out_channels,
scale_factor=2)
self.fusion = FeatureFusion(
in_channels=z_out_channels,
out_channels=z_out_channels)
def hybrid_forward(self, F, x8, x16, x32):
y32 = self.pool32(x32)
y16 = self.pool16(x32, y32)
y8 = self.pool8(x16, y16)
z8 = self.fusion(x8, y8)
return z8, y8, y16
class BiSeHead(HybridBlock):
"""
BiSeNet head (final) block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
mid_channels,
out_channels,
**kwargs):
super(BiSeHead, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class BiSeNet(HybridBlock):
"""
BiSeNet model from 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1808.00897.
Parameters:
----------
backbone : func -> nn.Sequential
Feature extractor.
aux : bool, default True
Whether to output an auxiliary results.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (640, 480)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
backbone,
aux=True,
fixed_size=True,
in_channels=3,
in_size=(640, 480),
classes=19,
**kwargs):
super(BiSeNet, self).__init__(**kwargs)
assert (in_channels == 3)
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
with self.name_scope():
self.backbone, backbone_out_channels = backbone()
y_out_channels = backbone_out_channels[0]
z_out_channels = 2 * y_out_channels
y32_out_size = (self.in_size[0] // 32, self.in_size[1] // 32) if fixed_size else None
self.pool = PyramidPooling(
x16_in_channels=backbone_out_channels[1],
x32_in_channels=backbone_out_channels[2],
y_out_channels=y_out_channels,
y32_out_size=y32_out_size)
self.head_z8 = BiSeHead(
in_channels=z_out_channels,
mid_channels=z_out_channels,
out_channels=classes)
self.up8 = InterpolationBlock(scale_factor=(8 if fixed_size else None))
if self.aux:
mid_channels = y_out_channels // 2
self.head_y8 = BiSeHead(
in_channels=y_out_channels,
mid_channels=mid_channels,
out_channels=classes)
self.head_y16 = BiSeHead(
in_channels=y_out_channels,
mid_channels=mid_channels,
out_channels=classes)
self.up16 = InterpolationBlock(scale_factor=(16 if fixed_size else None))
def hybrid_forward(self, F, x):
assert (x.shape[2] % 32 == 0) and (x.shape[3] % 32 == 0)
x8, x16, x32 = self.backbone(x)
z8, y8, y16 = self.pool(x8, x16, x32)
z8 = self.head_z8(z8)
z8 = self.up8(z8)
if self.aux:
y8 = self.head_y8(y8)
y16 = self.head_y16(y16)
y8 = self.up8(y8)
y16 = self.up16(y16)
return z8, y8, y16
else:
return z8
def get_bisenet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create BiSeNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = BiSeNet(
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def bisenet_resnet18_celebamaskhq(pretrained_backbone=False, classes=19, **kwargs):
"""
BiSeNet model on the base of ResNet-18 for face segmentation on CelebAMask-HQ from 'BiSeNet: Bilateral Segmentation
Network for Real-time Semantic Segmentation,' https://arxiv.org/abs/1808.00897.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
def backbone(**bb_kwargs):
features_raw = resnet18(pretrained=pretrained_backbone, **bb_kwargs).features[:-1]
features = MultiOutputSequential(return_last=False)
features.add(features_raw[0])
for i, stage in enumerate(features_raw[1:]):
if i != 0:
stage.do_output = True
features.add(stage)
out_channels = [128, 256, 512]
return features, out_channels
return get_bisenet(backbone=backbone, classes=classes, model_name="bisenet_resnet18_celebamaskhq", **kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (640, 480)
aux = True
pretrained = False
models = [
bisenet_resnet18_celebamaskhq,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != bisenet_resnet18_celebamaskhq or weight_count == 13300416)
else:
assert (model != bisenet_resnet18_celebamaskhq or weight_count == 13150272)
batch = 1
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
ys = net(x)
y = ys[0] if aux else ys
assert (y.shape == (batch, 19, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 13,872 | 30.386878 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/resnet.py | """
ResNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['ResNet', 'resnet10', 'resnet12', 'resnet14', 'resnetbc14b', 'resnet16', 'resnet18_wd4', 'resnet18_wd2',
'resnet18_w3d4', 'resnet18', 'resnet26', 'resnetbc26b', 'resnet34', 'resnetbc38b', 'resnet50', 'resnet50b',
'resnet101', 'resnet101b', 'resnet152', 'resnet152b', 'resnet200', 'resnet200b', 'ResBlock', 'ResBottleneck',
'ResUnit', 'ResInitBlock', 'get_resnet']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, conv7x7_block
class ResBlock(HybridBlock):
"""
Simple ResNet block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_bias=False,
use_bn=True,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(ResBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class ResBottleneck(HybridBlock):
"""
ResNet bottleneck block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
strides,
padding=1,
dilation=1,
bn_use_global_stats=False,
bn_cudnn_off=False,
conv1_stride=False,
bottleneck_factor=4,
**kwargs):
super(ResBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // bottleneck_factor
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=(strides if conv1_stride else 1),
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=(1 if conv1_stride else strides),
padding=padding,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class ResUnit(HybridBlock):
"""
ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer in bottleneck.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer in bottleneck.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
strides,
padding=1,
dilation=1,
use_bias=False,
use_bn=True,
bn_use_global_stats=False,
bn_cudnn_off=False,
bottleneck=True,
conv1_stride=False,
**kwargs):
super(ResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
conv1_stride=conv1_stride)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class ResInitBlock(HybridBlock):
"""
ResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(ResInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pool(x)
return x
class ResNet(HybridBlock):
"""
ResNet model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
bn_use_global_stats=False,
bn_cudnn_off=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_resnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def resnet10(**kwargs):
"""
ResNet-10 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=10, model_name="resnet10", **kwargs)
def resnet12(**kwargs):
"""
ResNet-12 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=12, model_name="resnet12", **kwargs)
def resnet14(**kwargs):
"""
ResNet-14 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=14, model_name="resnet14", **kwargs)
def resnetbc14b(**kwargs):
"""
ResNet-BC-14b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b", **kwargs)
def resnet16(**kwargs):
"""
ResNet-16 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=16, model_name="resnet16", **kwargs)
def resnet18_wd4(**kwargs):
"""
ResNet-18 model with 0.25 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.25, model_name="resnet18_wd4", **kwargs)
def resnet18_wd2(**kwargs):
"""
ResNet-18 model with 0.5 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.5, model_name="resnet18_wd2", **kwargs)
def resnet18_w3d4(**kwargs):
"""
ResNet-18 model with 0.75 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, width_scale=0.75, model_name="resnet18_w3d4", **kwargs)
def resnet18(**kwargs):
"""
ResNet-18 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, model_name="resnet18", **kwargs)
def resnet26(**kwargs):
"""
ResNet-26 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=26, bottleneck=False, model_name="resnet26", **kwargs)
def resnetbc26b(**kwargs):
"""
ResNet-BC-26b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b", **kwargs)
def resnet34(**kwargs):
"""
ResNet-34 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=34, model_name="resnet34", **kwargs)
def resnetbc38b(**kwargs):
"""
ResNet-BC-38b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b", **kwargs)
def resnet50(**kwargs):
"""
ResNet-50 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, model_name="resnet50", **kwargs)
def resnet50b(**kwargs):
"""
ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, conv1_stride=False, model_name="resnet50b", **kwargs)
def resnet101(**kwargs):
"""
ResNet-101 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, model_name="resnet101", **kwargs)
def resnet101b(**kwargs):
"""
ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, conv1_stride=False, model_name="resnet101b", **kwargs)
def resnet152(**kwargs):
"""
ResNet-152 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, model_name="resnet152", **kwargs)
def resnet152b(**kwargs):
"""
ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, conv1_stride=False, model_name="resnet152b", **kwargs)
def resnet200(**kwargs):
"""
ResNet-200 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=200, model_name="resnet200", **kwargs)
def resnet200b(**kwargs):
"""
ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=200, conv1_stride=False, model_name="resnet200b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
resnet10,
resnet12,
resnet14,
resnetbc14b,
resnet16,
resnet18_wd4,
resnet18_wd2,
resnet18_w3d4,
resnet18,
resnet26,
resnetbc26b,
resnet34,
resnetbc38b,
resnet50,
resnet50b,
resnet101,
resnet101b,
resnet152,
resnet152b,
resnet200,
resnet200b,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet10 or weight_count == 5418792)
assert (model != resnet12 or weight_count == 5492776)
assert (model != resnet14 or weight_count == 5788200)
assert (model != resnetbc14b or weight_count == 10064936)
assert (model != resnet16 or weight_count == 6968872)
assert (model != resnet18_wd4 or weight_count == 3937400)
assert (model != resnet18_wd2 or weight_count == 5804296)
assert (model != resnet18_w3d4 or weight_count == 8476056)
assert (model != resnet18 or weight_count == 11689512)
assert (model != resnet26 or weight_count == 17960232)
assert (model != resnetbc26b or weight_count == 15995176)
assert (model != resnet34 or weight_count == 21797672)
assert (model != resnetbc38b or weight_count == 21925416)
assert (model != resnet50 or weight_count == 25557032)
assert (model != resnet50b or weight_count == 25557032)
assert (model != resnet101 or weight_count == 44549160)
assert (model != resnet101b or weight_count == 44549160)
assert (model != resnet152 or weight_count == 60192808)
assert (model != resnet152b or weight_count == 60192808)
assert (model != resnet200 or weight_count == 64673832)
assert (model != resnet200b or weight_count == 64673832)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 30,943 | 34.123723 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/simpleposemobile_coco.py | """
SimplePose(Mobile) for COCO Keypoint, implemented in Gluon.
Original paper: 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
"""
__all__ = ['SimplePoseMobile', 'simplepose_mobile_resnet18_coco', 'simplepose_mobile_resnet50b_coco',
'simplepose_mobile_mobilenet_w1_coco', 'simplepose_mobile_mobilenetv2b_w1_coco',
'simplepose_mobile_mobilenetv3_small_w1_coco', 'simplepose_mobile_mobilenetv3_large_w1_coco']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, DucBlock, HeatmapMaxDetBlock
from .resnet import resnet18, resnet50b
from .mobilenet import mobilenet_w1
from .mobilenetv2 import mobilenetv2b_w1
from .mobilenetv3 import mobilenetv3_small_w1, mobilenetv3_large_w1
class SimplePoseMobile(HybridBlock):
"""
SimplePose(Mobile) model from 'Simple Baselines for Human Pose Estimation and Tracking,'
https://arxiv.org/abs/1804.06208.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
channels : list of int
Number of output channels for each decoder unit.
decoder_init_block_channels : int
Number of output channels for the initial unit of the decoder.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
return_heatmap : bool, default False
Whether to return only heatmap.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (256, 192)
Spatial size of the expected input image.
keypoints : int, default 17
Number of keypoints.
"""
def __init__(self,
backbone,
backbone_out_channels,
channels,
decoder_init_block_channels,
bn_use_global_stats=False,
bn_cudnn_off=True,
return_heatmap=False,
fixed_size=True,
in_channels=3,
in_size=(256, 192),
keypoints=17,
**kwargs):
super(SimplePoseMobile, self).__init__(**kwargs)
assert (in_channels == 3)
self.in_size = in_size
self.keypoints = keypoints
self.return_heatmap = return_heatmap
with self.name_scope():
self.backbone = backbone
self.decoder = nn.HybridSequential(prefix="")
in_channels = backbone_out_channels
self.decoder.add(conv1x1(
in_channels=in_channels,
out_channels=decoder_init_block_channels))
in_channels = decoder_init_block_channels
for out_channels in channels:
self.decoder.add(DucBlock(
in_channels=in_channels,
out_channels=out_channels,
scale_factor=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = out_channels
self.decoder.add(conv1x1(
in_channels=in_channels,
out_channels=keypoints))
self.heatmap_max_det = HeatmapMaxDetBlock(
channels=keypoints,
in_size=(in_size[0] // 4, in_size[1] // 4),
fixed_size=fixed_size)
def hybrid_forward(self, F, x):
x = self.backbone(x)
heatmap = self.decoder(x)
if self.return_heatmap:
return heatmap
else:
keypoints = self.heatmap_max_det(heatmap)
return keypoints
def get_simpleposemobile(backbone,
backbone_out_channels,
keypoints,
bn_cudnn_off,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SimplePose(Mobile) model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
keypoints : int
Number of keypoints.
bn_cudnn_off : bool
Whether to disable CUDNN batch normalization operator.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = [128, 64, 32]
decoder_init_block_channels = 256
net = SimplePoseMobile(
backbone=backbone,
backbone_out_channels=backbone_out_channels,
channels=channels,
decoder_init_block_channels=decoder_init_block_channels,
bn_cudnn_off=bn_cudnn_off,
keypoints=keypoints,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def simplepose_mobile_resnet18_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose(Mobile) model on the base of ResNet-18 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnet18(pretrained=pretrained_backbone).features[:-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=512, keypoints=keypoints,
bn_cudnn_off=bn_cudnn_off, model_name="simplepose_mobile_resnet18_coco", **kwargs)
def simplepose_mobile_resnet50b_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose(Mobile) model on the base of ResNet-50b for COCO Keypoint from 'Simple Baselines for Human Pose
Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnet50b(pretrained=pretrained_backbone).features[:-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
bn_cudnn_off=bn_cudnn_off, model_name="simplepose_mobile_resnet50b_coco", **kwargs)
def simplepose_mobile_mobilenet_w1_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose(Mobile) model on the base of 1.0 MobileNet-224 for COCO Keypoint from 'Simple Baselines for Human Pose
Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = mobilenet_w1(pretrained=pretrained_backbone).features[:-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=1024, keypoints=keypoints,
bn_cudnn_off=bn_cudnn_off, model_name="simplepose_mobile_mobilenet_w1_coco", **kwargs)
def simplepose_mobile_mobilenetv2b_w1_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose(Mobile) model on the base of 1.0 MobileNetV2b-224 for COCO Keypoint from 'Simple Baselines for Human Pose
Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = mobilenetv2b_w1(pretrained=pretrained_backbone).features[:-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=1280, keypoints=keypoints,
bn_cudnn_off=bn_cudnn_off, model_name="simplepose_mobile_mobilenetv2b_w1_coco", **kwargs)
def simplepose_mobile_mobilenetv3_small_w1_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose(Mobile) model on the base of MobileNetV3 Small 224/1.0 for COCO Keypoint from 'Simple Baselines for Human
Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = mobilenetv3_small_w1(pretrained=pretrained_backbone).features[:-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=576, keypoints=keypoints,
bn_cudnn_off=bn_cudnn_off, model_name="simplepose_mobile_mobilenetv3_small_w1_coco",
**kwargs)
def simplepose_mobile_mobilenetv3_large_w1_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose(Mobile) model on the base of MobileNetV3 Large 224/1.0 for COCO Keypoint from 'Simple Baselines for Human
Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = mobilenetv3_large_w1(pretrained=pretrained_backbone).features[:-1]
return get_simpleposemobile(backbone=backbone, backbone_out_channels=960, keypoints=keypoints,
bn_cudnn_off=bn_cudnn_off, model_name="simplepose_mobile_mobilenetv3_large_w1_coco",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (256, 192)
keypoints = 17
return_heatmap = True
pretrained = False
models = [
simplepose_mobile_resnet18_coco,
simplepose_mobile_resnet50b_coco,
simplepose_mobile_mobilenet_w1_coco,
simplepose_mobile_mobilenetv2b_w1_coco,
simplepose_mobile_mobilenetv3_small_w1_coco,
simplepose_mobile_mobilenetv3_large_w1_coco,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != simplepose_mobile_resnet18_coco or weight_count == 12858208)
assert (model != simplepose_mobile_resnet50b_coco or weight_count == 25582944)
assert (model != simplepose_mobile_mobilenet_w1_coco or weight_count == 5019744)
# assert (model != simplepose_mobile_mobilenetv2b_w1_coco or weight_count == 4102176)
assert (model != simplepose_mobile_mobilenetv3_small_w1_coco or weight_count == 2625088)
assert (model != simplepose_mobile_mobilenetv3_large_w1_coco or weight_count == 4768336)
batch = 14
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert ((y.shape[0] == batch) and (y.shape[1] == keypoints))
if return_heatmap:
assert ((y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4))
else:
assert (y.shape[2] == 3)
if __name__ == "__main__":
_test()
| 15,126 | 40.217984 | 121 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/cbamresnet.py | """
CBAM-ResNet for ImageNet-1K, implemented in Gluon.
Original paper: 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
"""
__all__ = ['CbamResNet', 'cbam_resnet18', 'cbam_resnet34', 'cbam_resnet50', 'cbam_resnet101', 'cbam_resnet152']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv7x7_block
from .resnet import ResInitBlock, ResBlock, ResBottleneck
class MLP(HybridBlock):
"""
Multilayer perceptron block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
"""
def __init__(self,
channels,
reduction_ratio=16,
**kwargs):
super(MLP, self).__init__(**kwargs)
mid_channels = channels // reduction_ratio
with self.name_scope():
self.flatten = nn.Flatten()
self.fc1 = nn.Dense(
units=mid_channels,
in_units=channels)
self.activ = nn.Activation("relu")
self.fc2 = nn.Dense(
units=channels,
in_units=mid_channels)
def hybrid_forward(self, F, x):
x = self.flatten(x)
x = self.fc1(x)
x = self.activ(x)
x = self.fc2(x)
return x
class ChannelGate(HybridBlock):
"""
CBAM channel gate block.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
"""
def __init__(self,
channels,
reduction_ratio=16,
**kwargs):
super(ChannelGate, self).__init__(**kwargs)
with self.name_scope():
self.avg_pool = nn.GlobalAvgPool2D()
self.max_pool = nn.GlobalMaxPool2D()
self.mlp = MLP(
channels=channels,
reduction_ratio=reduction_ratio)
self.sigmoid = nn.Activation("sigmoid")
def hybrid_forward(self, F, x):
att1 = self.avg_pool(x)
att1 = self.mlp(att1)
att2 = self.max_pool(x)
att2 = self.mlp(att2)
att = att1 + att2
att = self.sigmoid(att)
att = att.expand_dims(2).expand_dims(3).broadcast_like(x)
x = x * att
return x
class SpatialGate(HybridBlock):
"""
CBAM spatial gate block.
Parameters:
----------
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_use_global_stats,
**kwargs):
super(SpatialGate, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv7x7_block(
in_channels=2,
out_channels=1,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.sigmoid = nn.Activation("sigmoid")
def hybrid_forward(self, F, x):
att1 = x.max(axis=1).expand_dims(1)
att2 = x.mean(axis=1).expand_dims(1)
att = F.concat(att1, att2, dim=1)
att = self.conv(att)
att = self.sigmoid(att).broadcast_like(x)
x = x * att
return x
class CbamBlock(HybridBlock):
"""
CBAM attention block for CBAM-ResNet.
Parameters:
----------
channels : int
Number of input/output channels.
reduction_ratio : int, default 16
Channel reduction ratio.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
channels,
reduction_ratio=16,
bn_use_global_stats=False,
**kwargs):
super(CbamBlock, self).__init__(**kwargs)
with self.name_scope():
self.ch_gate = ChannelGate(
channels=channels,
reduction_ratio=reduction_ratio)
self.sp_gate = SpatialGate(bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.ch_gate(x)
x = self.sp_gate(x)
return x
class CbamResUnit(HybridBlock):
"""
CBAM-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
**kwargs):
super(CbamResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
conv1_stride=False)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.cbam = CbamBlock(channels=out_channels)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = self.cbam(x)
x = x + identity
x = self.activ(x)
return x
class CbamResNet(HybridBlock):
"""
CBAM-ResNet model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(CbamResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(CbamResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_resnet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create CBAM-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
use_se : bool
Whether to use SE block.
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported CBAM-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = CbamResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def cbam_resnet18(**kwargs):
"""
CBAM-ResNet-18 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, model_name="cbam_resnet18", **kwargs)
def cbam_resnet34(**kwargs):
"""
CBAM-ResNet-34 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=34, model_name="cbam_resnet34", **kwargs)
def cbam_resnet50(**kwargs):
"""
CBAM-ResNet-50 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, model_name="cbam_resnet50", **kwargs)
def cbam_resnet101(**kwargs):
"""
CBAM-ResNet-101 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, model_name="cbam_resnet101", **kwargs)
def cbam_resnet152(**kwargs):
"""
CBAM-ResNet-152 model from 'CBAM: Convolutional Block Attention Module,' https://arxiv.org/abs/1807.06521.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, model_name="cbam_resnet152", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
# cbam_resnet18,
# cbam_resnet34,
cbam_resnet50,
# cbam_resnet101,
# cbam_resnet152,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != cbam_resnet18 or weight_count == 11779392)
assert (model != cbam_resnet34 or weight_count == 21960468)
assert (model != cbam_resnet50 or weight_count == 28089624)
assert (model != cbam_resnet101 or weight_count == 49330172)
assert (model != cbam_resnet152 or weight_count == 66826848)
x = mx.nd.zeros((2, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (2, 1000))
if __name__ == "__main__":
_test()
| 15,289 | 30.987448 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/diracnetv2.py | """
DiracNetV2 for ImageNet-1K, implemented in Gluon.
Original paper: 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,'
https://arxiv.org/abs/1706.00388.
"""
__all__ = ['DiracNetV2', 'diracnet18v2', 'diracnet34v2']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
class DiracConv(HybridBlock):
"""
DiracNetV2 specific convolution block with pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
**kwargs):
super(DiracConv, self).__init__(**kwargs)
with self.name_scope():
self.activ = nn.Activation("relu")
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=True,
in_channels=in_channels)
def hybrid_forward(self, F, x):
x = self.activ(x)
x = self.conv(x)
return x
def dirac_conv3x3(in_channels,
out_channels,
**kwargs):
"""
3x3 version of the DiracNetV2 specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
return DiracConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=1,
padding=1,
**kwargs)
class DiracInitBlock(HybridBlock):
"""
DiracNetV2 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(DiracInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=7,
strides=2,
padding=3,
use_bias=True,
in_channels=in_channels)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pool(x)
return x
class DiracNetV2(HybridBlock):
"""
DiracNetV2 model from 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,'
https://arxiv.org/abs/1706.00388.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(DiracNetV2, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(DiracInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
stage.add(dirac_conv3x3(
in_channels=in_channels,
out_channels=out_channels))
in_channels = out_channels
if i != len(channels) - 1:
stage.add(nn.MaxPool2D(
pool_size=2,
strides=2,
padding=0))
self.features.add(stage)
self.features.add(nn.Activation("relu"))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_diracnetv2(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DiracNetV2 model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [4, 4, 4, 4]
elif blocks == 34:
layers = [6, 8, 12, 6]
else:
raise ValueError("Unsupported DiracNetV2 with number of blocks: {}".format(blocks))
channels_per_layers = [64, 128, 256, 512]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
init_block_channels = 64
net = DiracNetV2(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def diracnet18v2(**kwargs):
"""
DiracNetV2 model from 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,'
https://arxiv.org/abs/1706.00388.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diracnetv2(blocks=18, model_name="diracnet18v2", **kwargs)
def diracnet34v2(**kwargs):
"""
DiracNetV2 model from 'DiracNets: Training Very Deep Neural Networks Without Skip-Connections,'
https://arxiv.org/abs/1706.00388.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diracnetv2(blocks=34, model_name="diracnet34v2", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
diracnet18v2,
diracnet34v2,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != diracnet18v2 or weight_count == 11511784)
assert (model != diracnet34v2 or weight_count == 21616232)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 9,008 | 29.03 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/sepreresnet_cifar.py | """
SE-PreResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['CIFARSEPreResNet', 'sepreresnet20_cifar10', 'sepreresnet20_cifar100', 'sepreresnet20_svhn',
'sepreresnet56_cifar10', 'sepreresnet56_cifar100', 'sepreresnet56_svhn',
'sepreresnet110_cifar10', 'sepreresnet110_cifar100', 'sepreresnet110_svhn',
'sepreresnet164bn_cifar10', 'sepreresnet164bn_cifar100', 'sepreresnet164bn_svhn',
'sepreresnet272bn_cifar10', 'sepreresnet272bn_cifar100', 'sepreresnet272bn_svhn',
'sepreresnet542bn_cifar10', 'sepreresnet542bn_cifar100', 'sepreresnet542bn_svhn',
'sepreresnet1001_cifar10', 'sepreresnet1001_cifar100', 'sepreresnet1001_svhn',
'sepreresnet1202_cifar10', 'sepreresnet1202_cifar100', 'sepreresnet1202_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3_block
from .sepreresnet import SEPreResUnit
class CIFARSEPreResNet(HybridBlock):
"""
SE-PreResNet model for CIFAR from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARSEPreResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SEPreResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=False))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_sepreresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SE-PreResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARSEPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def sepreresnet20_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-20 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="sepreresnet20_cifar10",
**kwargs)
def sepreresnet20_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-20 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="sepreresnet20_cifar100",
**kwargs)
def sepreresnet20_svhn(classes=10, **kwargs):
"""
SE-PreResNet-20 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="sepreresnet20_svhn",
**kwargs)
def sepreresnet56_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-56 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="sepreresnet56_cifar10",
**kwargs)
def sepreresnet56_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-56 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="sepreresnet56_cifar100",
**kwargs)
def sepreresnet56_svhn(classes=10, **kwargs):
"""
SE-PreResNet-56 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="sepreresnet56_svhn",
**kwargs)
def sepreresnet110_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-110 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="sepreresnet110_cifar10",
**kwargs)
def sepreresnet110_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-110 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="sepreresnet110_cifar100",
**kwargs)
def sepreresnet110_svhn(classes=10, **kwargs):
"""
SE-PreResNet-110 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="sepreresnet110_svhn",
**kwargs)
def sepreresnet164bn_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-164(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="sepreresnet164bn_cifar10",
**kwargs)
def sepreresnet164bn_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-164(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="sepreresnet164bn_cifar100",
**kwargs)
def sepreresnet164bn_svhn(classes=10, **kwargs):
"""
SE-PreResNet-164(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="sepreresnet164bn_svhn",
**kwargs)
def sepreresnet272bn_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-272(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="sepreresnet272bn_cifar10",
**kwargs)
def sepreresnet272bn_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-272(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="sepreresnet272bn_cifar100",
**kwargs)
def sepreresnet272bn_svhn(classes=10, **kwargs):
"""
SE-PreResNet-272(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="sepreresnet272bn_svhn",
**kwargs)
def sepreresnet542bn_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-542(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="sepreresnet542bn_cifar10",
**kwargs)
def sepreresnet542bn_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-542(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="sepreresnet542bn_cifar100",
**kwargs)
def sepreresnet542bn_svhn(classes=10, **kwargs):
"""
SE-PreResNet-542(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="sepreresnet542bn_svhn",
**kwargs)
def sepreresnet1001_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-1001 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="sepreresnet1001_cifar10",
**kwargs)
def sepreresnet1001_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-1001 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="sepreresnet1001_cifar100",
**kwargs)
def sepreresnet1001_svhn(classes=10, **kwargs):
"""
SE-PreResNet-1001 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="sepreresnet1001_svhn",
**kwargs)
def sepreresnet1202_cifar10(classes=10, **kwargs):
"""
SE-PreResNet-1202 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="sepreresnet1202_cifar10",
**kwargs)
def sepreresnet1202_cifar100(classes=100, **kwargs):
"""
SE-PreResNet-1202 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="sepreresnet1202_cifar100",
**kwargs)
def sepreresnet1202_svhn(classes=10, **kwargs):
"""
SE-PreResNet-1202 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="sepreresnet1202_svhn",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(sepreresnet20_cifar10, 10),
(sepreresnet20_cifar100, 100),
(sepreresnet20_svhn, 10),
(sepreresnet56_cifar10, 10),
(sepreresnet56_cifar100, 100),
(sepreresnet56_svhn, 10),
(sepreresnet110_cifar10, 10),
(sepreresnet110_cifar100, 100),
(sepreresnet110_svhn, 10),
(sepreresnet164bn_cifar10, 10),
(sepreresnet164bn_cifar100, 100),
(sepreresnet164bn_svhn, 10),
(sepreresnet272bn_cifar10, 10),
(sepreresnet272bn_cifar100, 100),
(sepreresnet272bn_svhn, 10),
(sepreresnet542bn_cifar10, 10),
(sepreresnet542bn_cifar100, 100),
(sepreresnet542bn_svhn, 10),
(sepreresnet1001_cifar10, 10),
(sepreresnet1001_cifar100, 100),
(sepreresnet1001_svhn, 10),
(sepreresnet1202_cifar10, 10),
(sepreresnet1202_cifar100, 100),
(sepreresnet1202_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sepreresnet20_cifar10 or weight_count == 274559)
assert (model != sepreresnet20_cifar100 or weight_count == 280409)
assert (model != sepreresnet20_svhn or weight_count == 274559)
assert (model != sepreresnet56_cifar10 or weight_count == 862601)
assert (model != sepreresnet56_cifar100 or weight_count == 868451)
assert (model != sepreresnet56_svhn or weight_count == 862601)
assert (model != sepreresnet110_cifar10 or weight_count == 1744664)
assert (model != sepreresnet110_cifar100 or weight_count == 1750514)
assert (model != sepreresnet110_svhn or weight_count == 1744664)
assert (model != sepreresnet164bn_cifar10 or weight_count == 1904882)
assert (model != sepreresnet164bn_cifar100 or weight_count == 1928012)
assert (model != sepreresnet164bn_svhn or weight_count == 1904882)
assert (model != sepreresnet272bn_cifar10 or weight_count == 3152450)
assert (model != sepreresnet272bn_cifar100 or weight_count == 3175580)
assert (model != sepreresnet272bn_svhn or weight_count == 3152450)
assert (model != sepreresnet542bn_cifar10 or weight_count == 6271370)
assert (model != sepreresnet542bn_cifar100 or weight_count == 6294500)
assert (model != sepreresnet542bn_svhn or weight_count == 6271370)
assert (model != sepreresnet1001_cifar10 or weight_count == 11573534)
assert (model != sepreresnet1001_cifar100 or weight_count == 11596664)
assert (model != sepreresnet1001_svhn or weight_count == 11573534)
assert (model != sepreresnet1202_cifar10 or weight_count == 19581938)
assert (model != sepreresnet1202_cifar100 or weight_count == 19587788)
assert (model != sepreresnet1202_svhn or weight_count == 19581938)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 26,868 | 37.604885 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/danet.py | """
DANet for image segmentation, implemented in Gluon.
Original paper: 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
"""
__all__ = ['DANet', 'danet_resnetd50b_cityscapes', 'danet_resnetd101b_cityscapes', 'ScaleBlock']
import os
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv3x3_block
from .resnetd import resnetd50b, resnetd101b
class ScaleBlock(HybridBlock):
"""
Simple scale block.
"""
def __init__(self,
**kwargs):
super(ScaleBlock, self).__init__(**kwargs)
with self.name_scope():
self.alpha = self.params.get(
"alpha",
shape=(1,),
init=mx.init.Zero(),
allow_deferred_init=True)
def hybrid_forward(self, F, x, alpha):
return F.broadcast_mul(alpha, x)
def __repr__(self):
s = '{name}(alpha={alpha})'
return s.format(
name=self.__class__.__name__,
gamma=self.alpha.shape[0])
def calc_flops(self, x):
assert (x.shape[0] == 1)
num_flops = x.size
num_macs = 0
return num_flops, num_macs
class PosAttBlock(HybridBlock):
"""
Position attention block from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
It captures long-range spatial contextual information.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 8
Squeeze reduction value.
"""
def __init__(self,
channels,
reduction=8,
**kwargs):
super(PosAttBlock, self).__init__(**kwargs)
mid_channels = channels // reduction
with self.name_scope():
self.query_conv = conv1x1(
in_channels=channels,
out_channels=mid_channels,
use_bias=True)
self.key_conv = conv1x1(
in_channels=channels,
out_channels=mid_channels,
use_bias=True)
self.value_conv = conv1x1(
in_channels=channels,
out_channels=channels,
use_bias=True)
self.scale = ScaleBlock()
def hybrid_forward(self, F, x):
proj_query = self.query_conv(x).reshape((0, 0, -1))
proj_key = self.key_conv(x).reshape((0, 0, -1))
proj_value = self.value_conv(x).reshape((0, 0, -1))
energy = F.batch_dot(proj_query, proj_key, transpose_a=True)
w = F.softmax(energy)
y = F.batch_dot(proj_value, w, transpose_b=True)
y = F.reshape_like(y, x, lhs_begin=2, lhs_end=None, rhs_begin=2, rhs_end=None)
y = self.scale(y) + x
return y
class ChaAttBlock(HybridBlock):
"""
Channel attention block from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
It explicitly models interdependencies between channels.
"""
def __init__(self,
**kwargs):
super(ChaAttBlock, self).__init__(**kwargs)
with self.name_scope():
self.scale = ScaleBlock()
def hybrid_forward(self, F, x):
proj_query = x.reshape((0, 0, -1))
proj_key = x.reshape((0, 0, -1))
proj_value = x.reshape((0, 0, -1))
energy = F.batch_dot(proj_query, proj_key, transpose_b=True)
energy_new = energy.max(axis=-1, keepdims=True).broadcast_like(energy) - energy
w = F.softmax(energy_new)
y = F.batch_dot(w, proj_value)
y = F.reshape_like(y, x, lhs_begin=2, lhs_end=None, rhs_begin=2, rhs_end=None)
y = self.scale(y) + x
return y
class DANetHeadBranch(HybridBlock):
"""
DANet head branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
pose_att : bool, default True
Whether to use position attention instead of channel one.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
pose_att=True,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DANetHeadBranch, self).__init__(**kwargs)
mid_channels = in_channels // 4
dropout_rate = 0.1
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
if pose_att:
self.att = PosAttBlock(mid_channels)
else:
self.att = ChaAttBlock()
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv3 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.att(x)
y = self.conv2(x)
x = self.conv3(y)
x = self.dropout(x)
return x, y
class DANetHead(HybridBlock):
"""
DANet head block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DANetHead, self).__init__(**kwargs)
mid_channels = in_channels // 4
dropout_rate = 0.1
with self.name_scope():
self.branch_pa = DANetHeadBranch(
in_channels=in_channels,
out_channels=out_channels,
pose_att=True,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.branch_ca = DANetHeadBranch(
in_channels=in_channels,
out_channels=out_channels,
pose_att=False,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
pa_x, pa_y = self.branch_pa(x)
ca_x, ca_y = self.branch_ca(x)
y = pa_y + ca_y
x = self.conv(y)
x = self.dropout(x)
return x, pa_x, ca_x
class DANet(HybridBlock):
"""
DANet model from 'Dual Attention Network for Scene Segmentation,' https://arxiv.org/abs/1809.02983.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
bn_use_global_stats=False,
bn_cudnn_off=False,
in_channels=3,
in_size=(480, 480),
classes=19,
**kwargs):
super(DANet, self).__init__(**kwargs)
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
with self.name_scope():
self.backbone = backbone
self.head = DANetHead(
in_channels=backbone_out_channels,
out_channels=classes,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
x, _ = self.backbone(x)
x, y, z = self.head(x)
x = F.contrib.BilinearResize2D(x, height=in_size[0], width=in_size[1])
if self.aux:
y = F.contrib.BilinearResize2D(y, height=in_size[0], width=in_size[1])
z = F.contrib.BilinearResize2D(z, height=in_size[0], width=in_size[1])
return x, y, z
else:
return x
def get_danet(backbone,
classes,
aux=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DANet model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = DANet(
backbone=backbone,
classes=classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def danet_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
DANet model on the base of ResNet(D)-50b for Cityscapes from 'Dual Attention Network for Scene Segmentation,'
https://arxiv.org/abs/1809.02983.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_danet(backbone=backbone, classes=classes, aux=aux, model_name="danet_resnetd50b_cityscapes",
**kwargs)
def danet_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
DANet model on the base of ResNet(D)-101b for Cityscapes from 'Dual Attention Network for Scene Segmentation,'
https://arxiv.org/abs/1809.02983.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_danet(backbone=backbone, classes=classes, aux=aux, model_name="danet_resnetd101b_cityscapes",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (480, 480)
aux = False
pretrained = False
models = [
danet_resnetd50b_cityscapes,
danet_resnetd101b_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != danet_resnetd50b_cityscapes or weight_count == 47586427)
assert (model != danet_resnetd101b_cityscapes or weight_count == 66578555)
batch = 14
classes = 19
x = mx.nd.zeros((batch, 3, in_size[0], in_size[1]), ctx=ctx)
ys = net(x)
y = ys[0] if aux else ys
assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and
(y.shape[3] == x.shape[3]))
if __name__ == "__main__":
_test()
| 14,852 | 32.680272 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/mobilenetv2.py | """
MobileNetV2 for ImageNet-1K, implemented in Gluon.
Original paper: 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381.
"""
__all__ = ['MobileNetV2', 'mobilenetv2_w1', 'mobilenetv2_w3d4', 'mobilenetv2_wd2', 'mobilenetv2_wd4', 'mobilenetv2b_w1',
'mobilenetv2b_w3d4', 'mobilenetv2b_wd2', 'mobilenetv2b_wd4']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import ReLU6, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block
class LinearBottleneck(HybridBlock):
"""
So-called 'Linear Bottleneck' layer. It is used as a MobileNetV2 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
expansion : bool
Whether do expansion of channels.
remove_exp_conv : bool
Whether to remove expansion convolution.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
expansion,
remove_exp_conv,
**kwargs):
super(LinearBottleneck, self).__init__(**kwargs)
self.residual = (in_channels == out_channels) and (strides == 1)
mid_channels = in_channels * 6 if expansion else in_channels
self.use_exp_conv = (expansion or (not remove_exp_conv))
with self.name_scope():
if self.use_exp_conv:
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
activation=ReLU6())
self.conv2 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=ReLU6())
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
if self.residual:
x = x + identity
return x
class MobileNetV2(HybridBlock):
"""
MobileNetV2 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
remove_exp_conv : bool
Whether to remove expansion convolution.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
remove_exp_conv,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(MobileNetV2, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
activation=ReLU6()))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
expansion = (i != 0) or (j != 0)
stage.add(LinearBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
expansion=expansion,
remove_exp_conv=remove_exp_conv))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_use_global_stats=bn_use_global_stats,
activation=ReLU6()))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=False))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_mobilenetv2(width_scale,
remove_exp_conv=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create MobileNetV2 model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
remove_exp_conv : bool, default False
Whether to remove expansion convolution.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
final_block_channels = 1280
layers = [1, 2, 3, 4, 3, 3, 1]
downsample = [0, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 32, 64, 96, 160, 320]
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [[]])
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
if width_scale > 1.0:
final_block_channels = int(final_block_channels * width_scale)
net = MobileNetV2(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
remove_exp_conv=remove_exp_conv,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def mobilenetv2_w1(**kwargs):
"""
1.0 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=1.0, model_name="mobilenetv2_w1", **kwargs)
def mobilenetv2_w3d4(**kwargs):
"""
0.75 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.75, model_name="mobilenetv2_w3d4", **kwargs)
def mobilenetv2_wd2(**kwargs):
"""
0.5 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.5, model_name="mobilenetv2_wd2", **kwargs)
def mobilenetv2_wd4(**kwargs):
"""
0.25 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.25, model_name="mobilenetv2_wd4", **kwargs)
def mobilenetv2b_w1(**kwargs):
"""
1.0 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=1.0, remove_exp_conv=True, model_name="mobilenetv2b_w1", **kwargs)
def mobilenetv2b_w3d4(**kwargs):
"""
0.75 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.75, remove_exp_conv=True, model_name="mobilenetv2b_w3d4", **kwargs)
def mobilenetv2b_wd2(**kwargs):
"""
0.5 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.5, remove_exp_conv=True, model_name="mobilenetv2b_wd2", **kwargs)
def mobilenetv2b_wd4(**kwargs):
"""
0.25 MobileNetV2b-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv2(width_scale=0.25, remove_exp_conv=True, model_name="mobilenetv2b_wd4", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
mobilenetv2_w1,
mobilenetv2_w3d4,
mobilenetv2_wd2,
mobilenetv2_wd4,
mobilenetv2b_w1,
mobilenetv2b_w3d4,
mobilenetv2b_wd2,
mobilenetv2b_wd4,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenetv2_w1 or weight_count == 3504960)
assert (model != mobilenetv2_w3d4 or weight_count == 2627592)
assert (model != mobilenetv2_wd2 or weight_count == 1964736)
assert (model != mobilenetv2_wd4 or weight_count == 1516392)
assert (model != mobilenetv2b_w1 or weight_count == 3503872)
assert (model != mobilenetv2b_w3d4 or weight_count == 2626968)
assert (model != mobilenetv2b_wd2 or weight_count == 1964448)
assert (model != mobilenetv2b_wd4 or weight_count == 1516312)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 14,563 | 34.696078 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/squeezenet.py | """
SqueezeNet for ImageNet-1K, implemented in Gluon.
Original paper: 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,'
https://arxiv.org/abs/1602.07360.
"""
__all__ = ['SqueezeNet', 'squeezenet_v1_0', 'squeezenet_v1_1', 'squeezeresnet_v1_0', 'squeezeresnet_v1_1']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
class FireConv(HybridBlock):
"""
SqueezeNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
padding,
**kwargs):
super(FireConv, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
padding=padding,
in_channels=in_channels)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.activ(x)
return x
class FireUnit(HybridBlock):
"""
SqueezeNet unit, so-called 'Fire' unit.
Parameters:
----------
in_channels : int
Number of input channels.
squeeze_channels : int
Number of output channels for squeeze convolution blocks.
expand1x1_channels : int
Number of output channels for expand 1x1 convolution blocks.
expand3x3_channels : int
Number of output channels for expand 3x3 convolution blocks.
residual : bool
Whether use residual connection.
"""
def __init__(self,
in_channels,
squeeze_channels,
expand1x1_channels,
expand3x3_channels,
residual,
**kwargs):
super(FireUnit, self).__init__(**kwargs)
self.residual = residual
with self.name_scope():
self.squeeze = FireConv(
in_channels=in_channels,
out_channels=squeeze_channels,
kernel_size=1,
padding=0)
self.expand1x1 = FireConv(
in_channels=squeeze_channels,
out_channels=expand1x1_channels,
kernel_size=1,
padding=0)
self.expand3x3 = FireConv(
in_channels=squeeze_channels,
out_channels=expand3x3_channels,
kernel_size=3,
padding=1)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
x = self.squeeze(x)
y1 = self.expand1x1(x)
y2 = self.expand3x3(x)
out = F.concat(y1, y2, dim=1)
if self.residual:
out = out + identity
return out
class SqueezeInitBlock(HybridBlock):
"""
SqueezeNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
**kwargs):
super(SqueezeInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=2,
in_channels=in_channels)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.activ(x)
return x
class SqueezeNet(HybridBlock):
"""
SqueezeNet model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,'
https://arxiv.org/abs/1602.07360.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
residuals : bool
Whether to use residual units.
init_block_kernel_size : int or tuple/list of 2 int
The dimensions of the convolution window for the initial unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
residuals,
init_block_kernel_size,
init_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SqueezeNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(SqueezeInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
kernel_size=init_block_kernel_size))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
stage.add(nn.MaxPool2D(
pool_size=3,
strides=2,
ceil_mode=True))
for j, out_channels in enumerate(channels_per_stage):
expand_channels = out_channels // 2
squeeze_channels = out_channels // 8
stage.add(FireUnit(
in_channels=in_channels,
squeeze_channels=squeeze_channels,
expand1x1_channels=expand_channels,
expand3x3_channels=expand_channels,
residual=((residuals is not None) and (residuals[i][j] == 1))))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.Dropout(rate=0.5))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Conv2D(
channels=classes,
kernel_size=1,
in_channels=in_channels))
self.output.add(nn.Activation("relu"))
self.output.add(nn.AvgPool2D(
pool_size=13,
strides=1))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_squeezenet(version,
residual=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SqueezeNet model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('1.0' or '1.1').
residual : bool, default False
Whether to use residual connections.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version == "1.0":
channels = [[128, 128, 256], [256, 384, 384, 512], [512]]
residuals = [[0, 1, 0], [1, 0, 1, 0], [1]]
init_block_kernel_size = 7
init_block_channels = 96
elif version == "1.1":
channels = [[128, 128], [256, 256], [384, 384, 512, 512]]
residuals = [[0, 1], [0, 1], [0, 1, 0, 1]]
init_block_kernel_size = 3
init_block_channels = 64
else:
raise ValueError("Unsupported SqueezeNet version {}".format(version))
if not residual:
residuals = None
net = SqueezeNet(
channels=channels,
residuals=residuals,
init_block_kernel_size=init_block_kernel_size,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def squeezenet_v1_0(**kwargs):
"""
SqueezeNet 'vanilla' model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model
size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenet(version="1.0", residual=False, model_name="squeezenet_v1_0", **kwargs)
def squeezenet_v1_1(**kwargs):
"""
SqueezeNet v1.1 model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model
size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenet(version="1.1", residual=False, model_name="squeezenet_v1_1", **kwargs)
def squeezeresnet_v1_0(**kwargs):
"""
SqueezeNet model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and
<0.5MB model size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenet(version="1.0", residual=True, model_name="squeezeresnet_v1_0", **kwargs)
def squeezeresnet_v1_1(**kwargs):
"""
SqueezeNet v1.1 model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and <0.5MB model size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_squeezenet(version="1.1", residual=True, model_name="squeezeresnet_v1_1", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
squeezenet_v1_0,
squeezenet_v1_1,
# squeezeresnet_v1_0,
# squeezeresnet_v1_1,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != squeezenet_v1_0 or weight_count == 1248424)
assert (model != squeezenet_v1_1 or weight_count == 1235496)
assert (model != squeezeresnet_v1_0 or weight_count == 1248424)
assert (model != squeezeresnet_v1_1 or weight_count == 1235496)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 12,810 | 32.018041 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/octresnet_cifar.py | """
Oct-ResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Drop an Octave: Reducing Spatial Redundancy in Convolutional Neural Networks with Octave
Convolution,' https://arxiv.org/abs/1904.05049.
"""
__all__ = ['CIFAROctResNet', 'octresnet20_ad2_cifar10', 'octresnet20_ad2_cifar100', 'octresnet20_ad2_svhn',
'octresnet56_ad2_cifar10', 'octresnet56_ad2_cifar100', 'octresnet56_ad2_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3_block, DualPathSequential
from .octresnet import OctResUnit
class CIFAROctResNet(HybridBlock):
"""
Oct-ResNet model for CIFAR from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional Neural Networks with
Octave Convolution,' https://arxiv.org/abs/1904.05049.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
oct_alpha : float, default 0.5
Octave alpha coefficient.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
oct_alpha=0.5,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFAROctResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=1,
prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
if (i == 0) and (j == 0):
oct_mode = "first"
elif (i == len(channels) - 1) and (j == 0):
oct_mode = "last"
elif (i == len(channels) - 1) and (j != 0):
oct_mode = "std"
else:
oct_mode = "norm"
stage.add(OctResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
oct_alpha=oct_alpha,
oct_mode=oct_mode,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=False))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_octresnet_cifar(classes,
blocks,
bottleneck,
oct_alpha=0.5,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create Oct-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
oct_alpha : float, default 0.5
Octave alpha coefficient.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFAROctResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
oct_alpha=oct_alpha,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def octresnet20_ad2_cifar10(classes=10, **kwargs):
"""
Oct-ResNet-20 (alpha=1/2) model for CIFAR-10 from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional
Neural Networks with Octave Convolution,' https://arxiv.org/abs/1904.05049.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_octresnet_cifar(classes=classes, blocks=20, bottleneck=False, oct_alpha=0.5,
model_name="octresnet20_ad2_cifar10", **kwargs)
def octresnet20_ad2_cifar100(classes=100, **kwargs):
"""
Oct-ResNet-20 (alpha=1/2) model for CIFAR-100 from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional
Neural Networks with Octave Convolution,' https://arxiv.org/abs/1904.05049.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_octresnet_cifar(classes=classes, blocks=20, bottleneck=False, oct_alpha=0.5,
model_name="octresnet20_ad2_cifar100", **kwargs)
def octresnet20_ad2_svhn(classes=10, **kwargs):
"""
Oct-ResNet-20 (alpha=1/2) model for SVHN from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional
Neural Networks with Octave Convolution,' https://arxiv.org/abs/1904.05049.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_octresnet_cifar(classes=classes, blocks=20, bottleneck=False, oct_alpha=0.5,
model_name="octresnet20_ad2_svhn", **kwargs)
def octresnet56_ad2_cifar10(classes=10, **kwargs):
"""
Oct-ResNet-56 (alpha=1/2) model for CIFAR-10 from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional
Neural Networks with Octave Convolution,' https://arxiv.org/abs/1904.05049.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_octresnet_cifar(classes=classes, blocks=56, bottleneck=False, oct_alpha=0.5,
model_name="octresnet56_ad2_cifar10", **kwargs)
def octresnet56_ad2_cifar100(classes=100, **kwargs):
"""
Oct-ResNet-56 (alpha=1/2) model for CIFAR-100 from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional
Neural Networks with Octave Convolution,' https://arxiv.org/abs/1904.05049.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_octresnet_cifar(classes=classes, blocks=56, bottleneck=False, oct_alpha=0.5,
model_name="octresnet56_ad2_cifar100", **kwargs)
def octresnet56_ad2_svhn(classes=10, **kwargs):
"""
Oct-ResNet-56 (alpha=1/2) model for SVHN from 'Drop an Octave: Reducing Spatial Redundancy in Convolutional
Neural Networks with Octave Convolution,' https://arxiv.org/abs/1904.05049.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_octresnet_cifar(classes=classes, blocks=56, bottleneck=False, oct_alpha=0.5,
model_name="octresnet56_ad2_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(octresnet20_ad2_cifar10, 10),
(octresnet20_ad2_cifar100, 100),
(octresnet20_ad2_svhn, 10),
(octresnet56_ad2_cifar10, 10),
(octresnet56_ad2_cifar100, 100),
(octresnet56_ad2_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != octresnet20_ad2_cifar10 or weight_count == 272762)
assert (model != octresnet20_ad2_cifar100 or weight_count == 278612)
assert (model != octresnet20_ad2_svhn or weight_count == 272762)
assert (model != octresnet56_ad2_cifar10 or weight_count == 856058)
assert (model != octresnet56_ad2_cifar100 or weight_count == 861908)
assert (model != octresnet56_ad2_svhn or weight_count == 856058)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 12,716 | 36.293255 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/nin_cifar.py | """
NIN for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Network In Network,' https://arxiv.org/abs/1312.4400.
"""
__all__ = ['CIFARNIN', 'nin_cifar10', 'nin_cifar100', 'nin_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
class NINConv(HybridBlock):
"""
NIN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
**kwargs):
super(NINConv, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=True,
in_channels=in_channels)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.activ(x)
return x
class CIFARNIN(HybridBlock):
"""
NIN model for CIFAR from 'Network In Network,' https://arxiv.org/abs/1312.4400.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
first_kernel_sizes : list of int
Convolution window sizes for the first units in each stage.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
first_kernel_sizes,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARNIN, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
if (j == 0) and (i != 0):
if i == 1:
stage.add(nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1))
else:
stage.add(nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1))
stage.add(nn.Dropout(rate=0.5))
kernel_size = first_kernel_sizes[i] if j == 0 else 1
padding = (kernel_size - 1) // 2
stage.add(NINConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding))
in_channels = out_channels
self.features.add(stage)
self.output = nn.HybridSequential(prefix="")
self.output.add(NINConv(
in_channels=in_channels,
out_channels=classes,
kernel_size=1))
self.output.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_nin_cifar(classes,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create NIN model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = [[192, 160, 96], [192, 192, 192], [192, 192]]
first_kernel_sizes = [5, 5, 3]
net = CIFARNIN(
channels=channels,
first_kernel_sizes=first_kernel_sizes,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def nin_cifar10(classes=10, **kwargs):
"""
NIN model for CIFAR-10 from 'Network In Network,' https://arxiv.org/abs/1312.4400.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_nin_cifar(classes=classes, model_name="nin_cifar10", **kwargs)
def nin_cifar100(classes=100, **kwargs):
"""
NIN model for CIFAR-100 from 'Network In Network,' https://arxiv.org/abs/1312.4400.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_nin_cifar(classes=classes, model_name="nin_cifar100", **kwargs)
def nin_svhn(classes=10, **kwargs):
"""
NIN model for SVHN from 'Network In Network,' https://arxiv.org/abs/1312.4400.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_nin_cifar(classes=classes, model_name="nin_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(nin_cifar10, 10),
(nin_cifar100, 100),
(nin_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != nin_cifar10 or weight_count == 966986)
assert (model != nin_cifar100 or weight_count == 984356)
assert (model != nin_svhn or weight_count == 966986)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 8,489 | 31.037736 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/vgg.py | """
VGG for ImageNet-1K, implemented in Gluon.
Original paper: 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
"""
__all__ = ['VGG', 'vgg11', 'vgg13', 'vgg16', 'vgg19', 'bn_vgg11', 'bn_vgg13', 'bn_vgg16', 'bn_vgg19', 'bn_vgg11b',
'bn_vgg13b', 'bn_vgg16b', 'bn_vgg19b']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3_block
class VGGDense(HybridBlock):
"""
VGG specific dense block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(VGGDense, self).__init__(**kwargs)
with self.name_scope():
self.fc = nn.Dense(
units=out_channels,
in_units=in_channels)
self.activ = nn.Activation("relu")
self.dropout = nn.Dropout(rate=0.5)
def hybrid_forward(self, F, x):
x = self.fc(x)
x = self.activ(x)
x = self.dropout(x)
return x
class VGGOutputBlock(HybridBlock):
"""
VGG specific output block.
Parameters:
----------
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
"""
def __init__(self,
in_channels,
classes,
**kwargs):
super(VGGOutputBlock, self).__init__(**kwargs)
mid_channels = 4096
with self.name_scope():
self.fc1 = VGGDense(
in_channels=in_channels,
out_channels=mid_channels)
self.fc2 = VGGDense(
in_channels=mid_channels,
out_channels=mid_channels)
self.fc3 = nn.Dense(
units=classes,
in_units=mid_channels)
def hybrid_forward(self, F, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
class VGG(HybridBlock):
"""
VGG models from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
use_bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default False
Whether to use BatchNorm layers.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
use_bias=True,
use_bn=False,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(VGG, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
stage.add(conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
stage.add(nn.MaxPool2D(
pool_size=2,
strides=2,
padding=0))
self.features.add(stage)
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
in_channels = in_channels * 7 * 7
self.output.add(VGGOutputBlock(
in_channels=in_channels,
classes=classes))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_vgg(blocks,
use_bias=True,
use_bn=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create VGG model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
use_bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default False
Whether to use BatchNorm layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 11:
layers = [1, 1, 2, 2, 2]
elif blocks == 13:
layers = [2, 2, 2, 2, 2]
elif blocks == 16:
layers = [2, 2, 3, 3, 3]
elif blocks == 19:
layers = [2, 2, 4, 4, 4]
else:
raise ValueError("Unsupported VGG with number of blocks: {}".format(blocks))
channels_per_layers = [64, 128, 256, 512, 512]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = VGG(
channels=channels,
use_bias=use_bias,
use_bn=use_bn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def vgg11(**kwargs):
"""
VGG-11 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=11, model_name="vgg11", **kwargs)
def vgg13(**kwargs):
"""
VGG-13 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=13, model_name="vgg13", **kwargs)
def vgg16(**kwargs):
"""
VGG-16 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=16, model_name="vgg16", **kwargs)
def vgg19(**kwargs):
"""
VGG-19 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=19, model_name="vgg19", **kwargs)
def bn_vgg11(**kwargs):
"""
VGG-11 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=11, use_bias=False, use_bn=True, model_name="bn_vgg11", **kwargs)
def bn_vgg13(**kwargs):
"""
VGG-13 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=13, use_bias=False, use_bn=True, model_name="bn_vgg13", **kwargs)
def bn_vgg16(**kwargs):
"""
VGG-16 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=16, use_bias=False, use_bn=True, model_name="bn_vgg16", **kwargs)
def bn_vgg19(**kwargs):
"""
VGG-19 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=19, use_bias=False, use_bn=True, model_name="bn_vgg19", **kwargs)
def bn_vgg11b(**kwargs):
"""
VGG-11 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=11, use_bias=True, use_bn=True, model_name="bn_vgg11b", **kwargs)
def bn_vgg13b(**kwargs):
"""
VGG-13 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=13, use_bias=True, use_bn=True, model_name="bn_vgg13b", **kwargs)
def bn_vgg16b(**kwargs):
"""
VGG-16 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=16, use_bias=True, use_bn=True, model_name="bn_vgg16b", **kwargs)
def bn_vgg19b(**kwargs):
"""
VGG-19 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=19, use_bias=True, use_bn=True, model_name="bn_vgg19b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
vgg11,
vgg13,
vgg16,
vgg19,
bn_vgg11,
bn_vgg13,
bn_vgg16,
bn_vgg19,
bn_vgg11b,
bn_vgg13b,
bn_vgg16b,
bn_vgg19b,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != vgg11 or weight_count == 132863336)
assert (model != vgg13 or weight_count == 133047848)
assert (model != vgg16 or weight_count == 138357544)
assert (model != vgg19 or weight_count == 143667240)
assert (model != bn_vgg11 or weight_count == 132866088)
assert (model != bn_vgg13 or weight_count == 133050792)
assert (model != bn_vgg16 or weight_count == 138361768)
assert (model != bn_vgg19 or weight_count == 143672744)
assert (model != bn_vgg11b or weight_count == 132868840)
assert (model != bn_vgg13b or weight_count == 133053736)
assert (model != bn_vgg16b or weight_count == 138365992)
assert (model != bn_vgg19b or weight_count == 143678248)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 15,326 | 31.541401 | 117 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/resnet_cub.py | """
ResNet for CUB-200-2011, implemented in Gluon.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['resnet10_cub', 'resnet12_cub', 'resnet14_cub', 'resnetbc14b_cub', 'resnet16_cub', 'resnet18_cub',
'resnet26_cub', 'resnetbc26b_cub', 'resnet34_cub', 'resnetbc38b_cub', 'resnet50_cub', 'resnet50b_cub',
'resnet101_cub', 'resnet101b_cub', 'resnet152_cub', 'resnet152b_cub', 'resnet200_cub', 'resnet200b_cub']
from .resnet import get_resnet
def resnet10_cub(classes=200, **kwargs):
"""
ResNet-10 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=10, model_name="resnet10_cub", **kwargs)
def resnet12_cub(classes=200, **kwargs):
"""
ResNet-12 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=12, model_name="resnet12_cub", **kwargs)
def resnet14_cub(classes=200, **kwargs):
"""
ResNet-14 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=14, model_name="resnet14_cub", **kwargs)
def resnetbc14b_cub(classes=200, **kwargs):
"""
ResNet-BC-14b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b_cub",
**kwargs)
def resnet16_cub(classes=200, **kwargs):
"""
ResNet-16 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=16, model_name="resnet16_cub", **kwargs)
def resnet18_cub(classes=200, **kwargs):
"""
ResNet-18 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=18, model_name="resnet18_cub", **kwargs)
def resnet26_cub(classes=200, **kwargs):
"""
ResNet-26 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=26, bottleneck=False, model_name="resnet26_cub", **kwargs)
def resnetbc26b_cub(classes=200, **kwargs):
"""
ResNet-BC-26b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b_cub",
**kwargs)
def resnet34_cub(classes=200, **kwargs):
"""
ResNet-34 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=34, model_name="resnet34_cub", **kwargs)
def resnetbc38b_cub(classes=200, **kwargs):
"""
ResNet-BC-38b model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b_cub",
**kwargs)
def resnet50_cub(classes=200, **kwargs):
"""
ResNet-50 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=50, model_name="resnet50_cub", **kwargs)
def resnet50b_cub(classes=200, **kwargs):
"""
ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=50, conv1_stride=False, model_name="resnet50b_cub", **kwargs)
def resnet101_cub(classes=200, **kwargs):
"""
ResNet-101 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=101, model_name="resnet101_cub", **kwargs)
def resnet101b_cub(classes=200, **kwargs):
"""
ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=101, conv1_stride=False, model_name="resnet101b_cub", **kwargs)
def resnet152_cub(classes=200, **kwargs):
"""
ResNet-152 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=152, model_name="resnet152_cub", **kwargs)
def resnet152b_cub(classes=200, **kwargs):
"""
ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=152, conv1_stride=False, model_name="resnet152b_cub", **kwargs)
def resnet200_cub(classes=200, **kwargs):
"""
ResNet-200 model for CUB-200-2011 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=200, model_name="resnet200_cub", **kwargs)
def resnet200b_cub(classes=200, **kwargs):
"""
ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(classes=classes, blocks=200, conv1_stride=False, model_name="resnet200b_cub", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
resnet10_cub,
resnet12_cub,
resnet14_cub,
resnetbc14b_cub,
resnet16_cub,
resnet18_cub,
resnet26_cub,
resnetbc26b_cub,
resnet34_cub,
resnetbc38b_cub,
resnet50_cub,
resnet50b_cub,
resnet101_cub,
resnet101b_cub,
resnet152_cub,
resnet152b_cub,
resnet200_cub,
resnet200b_cub,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet10_cub or weight_count == 5008392)
assert (model != resnet12_cub or weight_count == 5082376)
assert (model != resnet14_cub or weight_count == 5377800)
assert (model != resnetbc14b_cub or weight_count == 8425736)
assert (model != resnet16_cub or weight_count == 6558472)
assert (model != resnet18_cub or weight_count == 11279112)
assert (model != resnet26_cub or weight_count == 17549832)
assert (model != resnetbc26b_cub or weight_count == 14355976)
assert (model != resnet34_cub or weight_count == 21387272)
assert (model != resnetbc38b_cub or weight_count == 20286216)
assert (model != resnet50_cub or weight_count == 23917832)
assert (model != resnet50b_cub or weight_count == 23917832)
assert (model != resnet101_cub or weight_count == 42909960)
assert (model != resnet101b_cub or weight_count == 42909960)
assert (model != resnet152_cub or weight_count == 58553608)
assert (model != resnet152b_cub or weight_count == 58553608)
assert (model != resnet200_cub or weight_count == 63034632)
assert (model != resnet200b_cub or weight_count == 63034632)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 200))
if __name__ == "__main__":
_test()
| 15,639 | 35.627635 | 117 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/bagnet.py | """
BagNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
"""
__all__ = ['BagNet', 'bagnet9', 'bagnet17', 'bagnet33']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv1x1_block, conv3x3_block, ConvBlock
class BagNetBottleneck(HybridBlock):
"""
BagNet bottleneck block for residual path in BagNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size of the second convolution.
strides : int or tuple/list of 2 int
Strides of the second convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
bn_use_global_stats,
bottleneck_factor=4,
**kwargs):
super(BagNetBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // bottleneck_factor
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = ConvBlock(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
padding=0,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class BagNetUnit(HybridBlock):
"""
BagNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size of the second body convolution.
strides : int or tuple/list of 2 int
Strides of the second body convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
bn_use_global_stats,
**kwargs):
super(BagNetUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = BagNetBottleneck(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
if self.resize_identity:
identity = F.slice_like(identity, x, axes=(2, 3))
x = x + identity
x = self.activ(x)
return x
class BagNetInitBlock(HybridBlock):
"""
BagNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(BagNetInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=out_channels)
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
padding=0,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class BagNet(HybridBlock):
"""
BagNet model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_pool_size : int
Size of the pooling windows for final pool.
normal_kernel_sizes : list of int
Count of the first units with 3x3 convolution window size for each stage.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_pool_size,
normal_kernel_sizes,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(BagNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(BagNetInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != len(channels) - 1) else 1
kernel_size = 3 if j < normal_kernel_sizes[i] else 1
stage.add(BagNetUnit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=final_pool_size,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_bagnet(field,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create BagNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
layers = [3, 4, 6, 3]
if field == 9:
normal_kernel_sizes = [1, 1, 0, 0]
final_pool_size = 27
elif field == 17:
normal_kernel_sizes = [1, 1, 1, 0]
final_pool_size = 26
elif field == 33:
normal_kernel_sizes = [1, 1, 1, 1]
final_pool_size = 24
else:
raise ValueError("Unsupported BagNet with field: {}".format(field))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = BagNet(
channels=channels,
init_block_channels=init_block_channels,
final_pool_size=final_pool_size,
normal_kernel_sizes=normal_kernel_sizes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def bagnet9(**kwargs):
"""
BagNet-9 model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_bagnet(field=9, model_name="bagnet9", **kwargs)
def bagnet17(**kwargs):
"""
BagNet-17 model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_bagnet(field=17, model_name="bagnet17", **kwargs)
def bagnet33(**kwargs):
"""
BagNet-33 model from 'Approximating CNNs with Bag-of-local-Features models works surprisingly well on ImageNet,'
https://openreview.net/pdf?id=SkfMWhAqYQ.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_bagnet(field=33, model_name="bagnet33", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
bagnet9,
bagnet17,
bagnet33,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != bagnet9 or weight_count == 15688744)
assert (model != bagnet17 or weight_count == 16213032)
assert (model != bagnet33 or weight_count == 18310184)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 12,862 | 32.066838 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/airnet.py | """
AirNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
"""
__all__ = ['AirNet', 'airnet50_1x64d_r2', 'airnet50_1x64d_r16', 'airnet101_1x64d_r2', 'AirBlock', 'AirInitBlock']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
class AirBlock(HybridBlock):
"""
AirNet attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int, default 1
Number of groups.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
ratio: int, default 2
Air compression ratio.
in_size : tuple of 2 int, default (None, None)
Spatial size of the input tensor for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
groups=1,
bn_use_global_stats=False,
ratio=2,
in_size=(None, None),
**kwargs):
super(AirBlock, self).__init__(**kwargs)
assert (out_channels % ratio == 0)
mid_channels = out_channels // ratio
self.in_size = in_size
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
groups=groups,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.sigmoid = nn.Activation("sigmoid")
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.pool(x)
x = self.conv2(x)
x = F.contrib.BilinearResize2D(x, height=self.in_size[0], width=self.in_size[1])
x = self.conv3(x)
x = self.sigmoid(x)
return x
class AirBottleneck(HybridBlock):
"""
AirNet bottleneck block for residual path in AirNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
ratio: int
Air compression ratio.
in_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
ratio,
in_size,
**kwargs):
super(AirBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
self.use_air_block = (strides == 1 and mid_channels < 512)
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
if self.use_air_block:
self.air = AirBlock(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
ratio=ratio,
in_size=in_size)
def hybrid_forward(self, F, x):
if self.use_air_block:
att = self.air(x)
x = self.conv1(x)
x = self.conv2(x)
if self.use_air_block:
x = x * att
x = self.conv3(x)
return x
class AirUnit(HybridBlock):
"""
AirNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
ratio: int
Air compression ratio.
in_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
ratio,
in_size,
**kwargs):
super(AirUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = AirBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
ratio=ratio,
in_size=in_size)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class AirInitBlock(HybridBlock):
"""
AirNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(AirInitBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.pool(x)
return x
class AirNet(HybridBlock):
"""
AirNet model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
ratio: int
Air compression ratio.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
ratio,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(AirNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(AirInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
in_size = tuple([x // 4 for x in in_size])
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(AirUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
ratio=ratio,
in_size=in_size))
in_channels = out_channels
in_size = tuple([x // strides for x in in_size])
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_airnet(blocks,
base_channels,
ratio,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create AirNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
base_channels: int
Base number of channels.
ratio: int
Air compression ratio.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported AirNet with number of blocks: {}".format(blocks))
bottleneck_expansion = 4
init_block_channels = base_channels
channels_per_layers = [base_channels * (2 ** i) * bottleneck_expansion for i in range(len(layers))]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = AirNet(
channels=channels,
init_block_channels=init_block_channels,
ratio=ratio,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def airnet50_1x64d_r2(**kwargs):
"""
AirNet50-1x64d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_airnet(blocks=50, base_channels=64, ratio=2, model_name="airnet50_1x64d_r2", **kwargs)
def airnet50_1x64d_r16(**kwargs):
"""
AirNet50-1x64d (r=16) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_airnet(blocks=50, base_channels=64, ratio=16, model_name="airnet50_1x64d_r16", **kwargs)
def airnet101_1x64d_r2(**kwargs):
"""
AirNet101-1x64d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_airnet(blocks=101, base_channels=64, ratio=2, model_name="airnet101_1x64d_r2", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
airnet50_1x64d_r2,
airnet50_1x64d_r16,
airnet101_1x64d_r2,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != airnet50_1x64d_r2 or weight_count == 27425864)
assert (model != airnet50_1x64d_r16 or weight_count == 25714952)
assert (model != airnet101_1x64d_r2 or weight_count == 51727432)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 15,893 | 32.461053 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/mnasnet.py | """
MnasNet for ImageNet-1K, implemented in Gluon.
Original paper: 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626.
"""
__all__ = ['MnasNet', 'mnasnet_b1', 'mnasnet_a1', 'mnasnet_small']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import round_channels, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock
class DwsExpSEResUnit(HybridBlock):
"""
Depthwise separable expanded residual unit with SE-block. Here it used as MnasNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the second convolution layer.
use_kernel3 : bool, default True
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : int, default 1
Expansion factor for each unit.
se_factor : int, default 0
SE reduction factor for each unit.
use_skip : bool, default True
Whether to use skip connection.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
activation : str, default 'relu'
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
strides=1,
use_kernel3=True,
exp_factor=1,
se_factor=0,
use_skip=True,
bn_use_global_stats=False,
activation="relu",
**kwargs):
super(DwsExpSEResUnit, self).__init__(**kwargs)
assert (exp_factor >= 1)
self.residual = (in_channels == out_channels) and (strides == 1) and use_skip
self.use_exp_conv = exp_factor > 1
self.use_se = se_factor > 0
mid_channels = exp_factor * in_channels
dwconv_block_fn = dwconv3x3_block if use_kernel3 else dwconv5x5_block
with self.name_scope():
if self.use_exp_conv:
self.exp_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
self.dw_conv = dwconv_block_fn(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=(exp_factor * se_factor),
round_mid=False,
mid_activation=activation)
self.pw_conv = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.exp_conv(x)
x = self.dw_conv(x)
if self.use_se:
x = self.se(x)
x = self.pw_conv(x)
if self.residual:
x = x + identity
return x
class MnasInitBlock(HybridBlock):
"""
MnasNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
use_skip : bool
Whether to use skip connection in the second block.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
use_skip,
bn_use_global_stats=False,
**kwargs):
super(MnasInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = DwsExpSEResUnit(
in_channels=mid_channels,
out_channels=out_channels,
use_skip=use_skip,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class MnasFinalBlock(HybridBlock):
"""
MnasNet specific final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
use_skip : bool
Whether to use skip connection in the second block.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
use_skip,
bn_use_global_stats=False,
**kwargs):
super(MnasFinalBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = DwsExpSEResUnit(
in_channels=in_channels,
out_channels=mid_channels,
exp_factor=6,
use_skip=use_skip,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class MnasNet(HybridBlock):
"""
MnasNet model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : list of 2 int
Number of output channels for the initial unit.
final_block_channels : list of 2 int
Number of output channels for the final block of the feature extractor.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
se_factors : list of list of int
SE reduction factor for each unit.
init_block_use_skip : bool
Whether to use skip connection in the initial unit.
final_block_use_skip : bool
Whether to use skip connection in the final block of the feature extractor.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernels3,
exp_factors,
se_factors,
init_block_use_skip,
final_block_use_skip,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(MnasNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(MnasInitBlock(
in_channels=in_channels,
out_channels=init_block_channels[1],
mid_channels=init_block_channels[0],
use_skip=init_block_use_skip,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels[1]
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) else 1
use_kernel3 = kernels3[i][j] == 1
exp_factor = exp_factors[i][j]
se_factor = se_factors[i][j]
stage.add(DwsExpSEResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
se_factor=se_factor,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(MnasFinalBlock(
in_channels=in_channels,
out_channels=final_block_channels[1],
mid_channels=final_block_channels[0],
use_skip=final_block_use_skip,
bn_use_global_stats=bn_use_global_stats))
in_channels = final_block_channels[1]
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_mnasnet(version,
width_scale,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create MnasNet model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('b1', 'a1' or 'small').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version == "b1":
init_block_channels = [32, 16]
final_block_channels = [320, 1280]
channels = [[24, 24, 24], [40, 40, 40], [80, 80, 80, 96, 96], [192, 192, 192, 192]]
kernels3 = [[1, 1, 1], [0, 0, 0], [0, 0, 0, 1, 1], [0, 0, 0, 0]]
exp_factors = [[3, 3, 3], [3, 3, 3], [6, 6, 6, 6, 6], [6, 6, 6, 6]]
se_factors = [[0, 0, 0], [0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0]]
init_block_use_skip = False
final_block_use_skip = False
elif version == "a1":
init_block_channels = [32, 16]
final_block_channels = [320, 1280]
channels = [[24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]]
kernels3 = [[1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]]
exp_factors = [[6, 6], [3, 3, 3], [6, 6, 6, 6, 6, 6], [6, 6, 6]]
se_factors = [[0, 0], [4, 4, 4], [0, 0, 0, 0, 4, 4], [4, 4, 4]]
init_block_use_skip = False
final_block_use_skip = True
elif version == "small":
init_block_channels = [8, 8]
final_block_channels = [144, 1280]
channels = [[16], [16, 16], [32, 32, 32, 32, 32, 32, 32], [88, 88, 88]]
kernels3 = [[1], [1, 1], [0, 0, 0, 0, 1, 1, 1], [0, 0, 0]]
exp_factors = [[3], [6, 6], [6, 6, 6, 6, 6, 6, 6], [6, 6, 6]]
se_factors = [[0], [0, 0], [4, 4, 4, 4, 4, 4, 4], [4, 4, 4]]
init_block_use_skip = True
final_block_use_skip = True
else:
raise ValueError("Unsupported MnasNet version {}".format(version))
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = round_channels(init_block_channels * width_scale)
net = MnasNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernels3=kernels3,
exp_factors=exp_factors,
se_factors=se_factors,
init_block_use_skip=init_block_use_skip,
final_block_use_skip=final_block_use_skip,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def mnasnet_b1(**kwargs):
"""
MnasNet-B1 model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mnasnet(version="b1", width_scale=1.0, model_name="mnasnet_b1", **kwargs)
def mnasnet_a1(**kwargs):
"""
MnasNet-A1 model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mnasnet(version="a1", width_scale=1.0, model_name="mnasnet_a1", **kwargs)
def mnasnet_small(**kwargs):
"""
MnasNet-Small model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mnasnet(version="small", width_scale=1.0, model_name="mnasnet_small", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
mnasnet_b1,
mnasnet_a1,
mnasnet_small,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mnasnet_b1 or weight_count == 4383312)
assert (model != mnasnet_a1 or weight_count == 3887038)
assert (model != mnasnet_small or weight_count == 2030264)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 16,501 | 34.95207 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/pyramidnet_cifar.py | """
PyramidNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
"""
__all__ = ['CIFARPyramidNet', 'pyramidnet110_a48_cifar10', 'pyramidnet110_a48_cifar100', 'pyramidnet110_a48_svhn',
'pyramidnet110_a84_cifar10', 'pyramidnet110_a84_cifar100', 'pyramidnet110_a84_svhn',
'pyramidnet110_a270_cifar10', 'pyramidnet110_a270_cifar100', 'pyramidnet110_a270_svhn',
'pyramidnet164_a270_bn_cifar10', 'pyramidnet164_a270_bn_cifar100', 'pyramidnet164_a270_bn_svhn',
'pyramidnet200_a240_bn_cifar10', 'pyramidnet200_a240_bn_cifar100', 'pyramidnet200_a240_bn_svhn',
'pyramidnet236_a220_bn_cifar10', 'pyramidnet236_a220_bn_cifar100', 'pyramidnet236_a220_bn_svhn',
'pyramidnet272_a200_bn_cifar10', 'pyramidnet272_a200_bn_cifar100', 'pyramidnet272_a200_bn_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3_block
from .preresnet import PreResActivation
from .pyramidnet import PyrUnit
class CIFARPyramidNet(HybridBlock):
"""
PyramidNet model for CIFAR from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARPyramidNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(PyrUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_pyramidnet_cifar(classes,
blocks,
alpha,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create PyramidNet for CIFAR model with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
alpha : int
PyramidNet's alpha value.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
growth_add = float(alpha) / float(sum(layers))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [[(i + 1) * growth_add + xi[-1][-1] for i in list(range(yi))]],
layers,
[[init_block_channels]])[1:]
channels = [[int(round(cij)) for cij in ci] for ci in channels]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARPyramidNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def pyramidnet110_a48_cifar10(classes=10, **kwargs):
"""
PyramidNet-110 (a=48) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=48,
bottleneck=False,
model_name="pyramidnet110_a48_cifar10",
**kwargs)
def pyramidnet110_a48_cifar100(classes=100, **kwargs):
"""
PyramidNet-110 (a=48) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=48,
bottleneck=False,
model_name="pyramidnet110_a48_cifar100",
**kwargs)
def pyramidnet110_a48_svhn(classes=10, **kwargs):
"""
PyramidNet-110 (a=48) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=48,
bottleneck=False,
model_name="pyramidnet110_a48_svhn",
**kwargs)
def pyramidnet110_a84_cifar10(classes=10, **kwargs):
"""
PyramidNet-110 (a=84) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_cifar10",
**kwargs)
def pyramidnet110_a84_cifar100(classes=100, **kwargs):
"""
PyramidNet-110 (a=84) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_cifar100",
**kwargs)
def pyramidnet110_a84_svhn(classes=10, **kwargs):
"""
PyramidNet-110 (a=84) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=84,
bottleneck=False,
model_name="pyramidnet110_a84_svhn",
**kwargs)
def pyramidnet110_a270_cifar10(classes=10, **kwargs):
"""
PyramidNet-110 (a=270) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=270,
bottleneck=False,
model_name="pyramidnet110_a270_cifar10",
**kwargs)
def pyramidnet110_a270_cifar100(classes=100, **kwargs):
"""
PyramidNet-110 (a=270) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=270,
bottleneck=False,
model_name="pyramidnet110_a270_cifar100",
**kwargs)
def pyramidnet110_a270_svhn(classes=10, **kwargs):
"""
PyramidNet-110 (a=270) model for SVHN from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=110,
alpha=270,
bottleneck=False,
model_name="pyramidnet110_a270_svhn",
**kwargs)
def pyramidnet164_a270_bn_cifar10(classes=10, **kwargs):
"""
PyramidNet-164 (a=270, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=164,
alpha=270,
bottleneck=True,
model_name="pyramidnet164_a270_bn_cifar10",
**kwargs)
def pyramidnet164_a270_bn_cifar100(classes=100, **kwargs):
"""
PyramidNet-164 (a=270, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=164,
alpha=270,
bottleneck=True,
model_name="pyramidnet164_a270_bn_cifar100",
**kwargs)
def pyramidnet164_a270_bn_svhn(classes=10, **kwargs):
"""
PyramidNet-164 (a=270, bn) model for SVHN from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=164,
alpha=270,
bottleneck=True,
model_name="pyramidnet164_a270_bn_svhn",
**kwargs)
def pyramidnet200_a240_bn_cifar10(classes=10, **kwargs):
"""
PyramidNet-200 (a=240, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=200,
alpha=240,
bottleneck=True,
model_name="pyramidnet200_a240_bn_cifar10",
**kwargs)
def pyramidnet200_a240_bn_cifar100(classes=100, **kwargs):
"""
PyramidNet-200 (a=240, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=200,
alpha=240,
bottleneck=True,
model_name="pyramidnet200_a240_bn_cifar100",
**kwargs)
def pyramidnet200_a240_bn_svhn(classes=10, **kwargs):
"""
PyramidNet-200 (a=240, bn) model for SVHN from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=200,
alpha=240,
bottleneck=True,
model_name="pyramidnet200_a240_bn_svhn",
**kwargs)
def pyramidnet236_a220_bn_cifar10(classes=10, **kwargs):
"""
PyramidNet-236 (a=220, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=236,
alpha=220,
bottleneck=True,
model_name="pyramidnet236_a220_bn_cifar10",
**kwargs)
def pyramidnet236_a220_bn_cifar100(classes=100, **kwargs):
"""
PyramidNet-236 (a=220, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=236,
alpha=220,
bottleneck=True,
model_name="pyramidnet236_a220_bn_cifar100",
**kwargs)
def pyramidnet236_a220_bn_svhn(classes=10, **kwargs):
"""
PyramidNet-236 (a=220, bn) model for SVHN from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=236,
alpha=220,
bottleneck=True,
model_name="pyramidnet236_a220_bn_svhn",
**kwargs)
def pyramidnet272_a200_bn_cifar10(classes=10, **kwargs):
"""
PyramidNet-272 (a=200, bn) model for CIFAR-10 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=272,
alpha=200,
bottleneck=True,
model_name="pyramidnet272_a200_bn_cifar10",
**kwargs)
def pyramidnet272_a200_bn_cifar100(classes=100, **kwargs):
"""
PyramidNet-272 (a=200, bn) model for CIFAR-100 from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=272,
alpha=200,
bottleneck=True,
model_name="pyramidnet272_a200_bn_cifar100",
**kwargs)
def pyramidnet272_a200_bn_svhn(classes=10, **kwargs):
"""
PyramidNet-272 (a=200, bn) model for SVHN from 'Deep Pyramidal Residual Networks,'
https://arxiv.org/abs/1610.02915.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet_cifar(
classes=classes,
blocks=272,
alpha=200,
bottleneck=True,
model_name="pyramidnet272_a200_bn_svhn",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(pyramidnet110_a48_cifar10, 10),
(pyramidnet110_a48_cifar100, 100),
(pyramidnet110_a48_svhn, 10),
(pyramidnet110_a84_cifar10, 10),
(pyramidnet110_a84_cifar100, 100),
(pyramidnet110_a84_svhn, 10),
(pyramidnet110_a270_cifar10, 10),
(pyramidnet110_a270_cifar100, 100),
(pyramidnet110_a270_svhn, 10),
(pyramidnet164_a270_bn_cifar10, 10),
(pyramidnet164_a270_bn_cifar100, 100),
(pyramidnet164_a270_bn_svhn, 10),
(pyramidnet200_a240_bn_cifar10, 10),
(pyramidnet200_a240_bn_cifar100, 100),
(pyramidnet200_a240_bn_svhn, 10),
(pyramidnet236_a220_bn_cifar10, 10),
(pyramidnet236_a220_bn_cifar100, 100),
(pyramidnet236_a220_bn_svhn, 10),
(pyramidnet272_a200_bn_cifar10, 10),
(pyramidnet272_a200_bn_cifar100, 100),
(pyramidnet272_a200_bn_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained, classes=classes)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != pyramidnet110_a48_cifar10 or weight_count == 1772706)
assert (model != pyramidnet110_a48_cifar100 or weight_count == 1778556)
assert (model != pyramidnet110_a48_svhn or weight_count == 1772706)
assert (model != pyramidnet110_a84_cifar10 or weight_count == 3904446)
assert (model != pyramidnet110_a84_cifar100 or weight_count == 3913536)
assert (model != pyramidnet110_a84_svhn or weight_count == 3904446)
assert (model != pyramidnet110_a270_cifar10 or weight_count == 28485477)
assert (model != pyramidnet110_a270_cifar100 or weight_count == 28511307)
assert (model != pyramidnet110_a270_svhn or weight_count == 28485477)
assert (model != pyramidnet164_a270_bn_cifar10 or weight_count == 27216021)
assert (model != pyramidnet164_a270_bn_cifar100 or weight_count == 27319071)
assert (model != pyramidnet164_a270_bn_svhn or weight_count == 27216021)
assert (model != pyramidnet200_a240_bn_cifar10 or weight_count == 26752702)
assert (model != pyramidnet200_a240_bn_cifar100 or weight_count == 26844952)
assert (model != pyramidnet200_a240_bn_svhn or weight_count == 26752702)
assert (model != pyramidnet236_a220_bn_cifar10 or weight_count == 26969046)
assert (model != pyramidnet236_a220_bn_cifar100 or weight_count == 27054096)
assert (model != pyramidnet236_a220_bn_svhn or weight_count == 26969046)
assert (model != pyramidnet272_a200_bn_cifar10 or weight_count == 26210842)
assert (model != pyramidnet272_a200_bn_cifar100 or weight_count == 26288692)
assert (model != pyramidnet272_a200_bn_svhn or weight_count == 26210842)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 25,957 | 33.110381 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/preresnet_cifar.py | """
PreResNet for CIFAR/SVHN, implemented in Gluon.
Original papers: 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
"""
__all__ = ['CIFARPreResNet', 'preresnet20_cifar10', 'preresnet20_cifar100', 'preresnet20_svhn',
'preresnet56_cifar10', 'preresnet56_cifar100', 'preresnet56_svhn',
'preresnet110_cifar10', 'preresnet110_cifar100', 'preresnet110_svhn',
'preresnet164bn_cifar10', 'preresnet164bn_cifar100', 'preresnet164bn_svhn',
'preresnet272bn_cifar10', 'preresnet272bn_cifar100', 'preresnet272bn_svhn',
'preresnet542bn_cifar10', 'preresnet542bn_cifar100', 'preresnet542bn_svhn',
'preresnet1001_cifar10', 'preresnet1001_cifar100', 'preresnet1001_svhn',
'preresnet1202_cifar10', 'preresnet1202_cifar100', 'preresnet1202_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3
from .preresnet import PreResUnit, PreResActivation
class CIFARPreResNet(HybridBlock):
"""
PreResNet model for CIFAR from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARPreResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(PreResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=False))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_preresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create PreResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def preresnet20_cifar10(classes=10, **kwargs):
"""
PreResNet-20 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="preresnet20_cifar10", **kwargs)
def preresnet20_cifar100(classes=100, **kwargs):
"""
PreResNet-20 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="preresnet20_cifar100",
**kwargs)
def preresnet20_svhn(classes=10, **kwargs):
"""
PreResNet-20 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="preresnet20_svhn", **kwargs)
def preresnet56_cifar10(classes=10, **kwargs):
"""
PreResNet-56 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="preresnet56_cifar10", **kwargs)
def preresnet56_cifar100(classes=100, **kwargs):
"""
PreResNet-56 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="preresnet56_cifar100",
**kwargs)
def preresnet56_svhn(classes=10, **kwargs):
"""
PreResNet-56 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="preresnet56_svhn", **kwargs)
def preresnet110_cifar10(classes=10, **kwargs):
"""
PreResNet-110 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="preresnet110_cifar10",
**kwargs)
def preresnet110_cifar100(classes=100, **kwargs):
"""
PreResNet-110 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="preresnet110_cifar100",
**kwargs)
def preresnet110_svhn(classes=10, **kwargs):
"""
PreResNet-110 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="preresnet110_svhn",
**kwargs)
def preresnet164bn_cifar10(classes=10, **kwargs):
"""
PreResNet-164(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="preresnet164bn_cifar10",
**kwargs)
def preresnet164bn_cifar100(classes=100, **kwargs):
"""
PreResNet-164(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="preresnet164bn_cifar100",
**kwargs)
def preresnet164bn_svhn(classes=10, **kwargs):
"""
PreResNet-164(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="preresnet164bn_svhn",
**kwargs)
def preresnet272bn_cifar10(classes=10, **kwargs):
"""
PreResNet-272(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="preresnet272bn_cifar10",
**kwargs)
def preresnet272bn_cifar100(classes=100, **kwargs):
"""
PreResNet-272(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="preresnet272bn_cifar100",
**kwargs)
def preresnet272bn_svhn(classes=10, **kwargs):
"""
PreResNet-272(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="preresnet272bn_svhn",
**kwargs)
def preresnet542bn_cifar10(classes=10, **kwargs):
"""
PreResNet-542(BN) model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="preresnet542bn_cifar10",
**kwargs)
def preresnet542bn_cifar100(classes=100, **kwargs):
"""
PreResNet-542(BN) model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="preresnet542bn_cifar100",
**kwargs)
def preresnet542bn_svhn(classes=10, **kwargs):
"""
PreResNet-542(BN) model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="preresnet542bn_svhn",
**kwargs)
def preresnet1001_cifar10(classes=10, **kwargs):
"""
PreResNet-1001 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="preresnet1001_cifar10",
**kwargs)
def preresnet1001_cifar100(classes=100, **kwargs):
"""
PreResNet-1001 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="preresnet1001_cifar100",
**kwargs)
def preresnet1001_svhn(classes=10, **kwargs):
"""
PreResNet-1001 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="preresnet1001_svhn",
**kwargs)
def preresnet1202_cifar10(classes=10, **kwargs):
"""
PreResNet-1202 model for CIFAR-10 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="preresnet1202_cifar10",
**kwargs)
def preresnet1202_cifar100(classes=100, **kwargs):
"""
PreResNet-1202 model for CIFAR-100 from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="preresnet1202_cifar100",
**kwargs)
def preresnet1202_svhn(classes=10, **kwargs):
"""
PreResNet-1202 model for SVHN from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_preresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="preresnet1202_svhn",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(preresnet20_cifar10, 10),
(preresnet20_cifar100, 100),
(preresnet20_svhn, 10),
(preresnet56_cifar10, 10),
(preresnet56_cifar100, 100),
(preresnet56_svhn, 10),
(preresnet110_cifar10, 10),
(preresnet110_cifar100, 100),
(preresnet110_svhn, 10),
(preresnet164bn_cifar10, 10),
(preresnet164bn_cifar100, 100),
(preresnet164bn_svhn, 10),
(preresnet272bn_cifar10, 10),
(preresnet272bn_cifar100, 100),
(preresnet272bn_svhn, 10),
(preresnet542bn_cifar10, 10),
(preresnet542bn_cifar100, 100),
(preresnet542bn_svhn, 10),
(preresnet1001_cifar10, 10),
(preresnet1001_cifar100, 100),
(preresnet1001_svhn, 10),
(preresnet1202_cifar10, 10),
(preresnet1202_cifar100, 100),
(preresnet1202_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != preresnet20_cifar10 or weight_count == 272282)
assert (model != preresnet20_cifar100 or weight_count == 278132)
assert (model != preresnet20_svhn or weight_count == 272282)
assert (model != preresnet56_cifar10 or weight_count == 855578)
assert (model != preresnet56_cifar100 or weight_count == 861428)
assert (model != preresnet56_svhn or weight_count == 855578)
assert (model != preresnet110_cifar10 or weight_count == 1730522)
assert (model != preresnet110_cifar100 or weight_count == 1736372)
assert (model != preresnet110_svhn or weight_count == 1730522)
assert (model != preresnet164bn_cifar10 or weight_count == 1703258)
assert (model != preresnet164bn_cifar100 or weight_count == 1726388)
assert (model != preresnet164bn_svhn or weight_count == 1703258)
assert (model != preresnet272bn_cifar10 or weight_count == 2816090)
assert (model != preresnet272bn_cifar100 or weight_count == 2839220)
assert (model != preresnet272bn_svhn or weight_count == 2816090)
assert (model != preresnet542bn_cifar10 or weight_count == 5598170)
assert (model != preresnet542bn_cifar100 or weight_count == 5621300)
assert (model != preresnet542bn_svhn or weight_count == 5598170)
assert (model != preresnet1001_cifar10 or weight_count == 10327706)
assert (model != preresnet1001_cifar100 or weight_count == 10350836)
assert (model != preresnet1001_svhn or weight_count == 10327706)
assert (model != preresnet1202_cifar10 or weight_count == 19423834)
assert (model != preresnet1202_cifar100 or weight_count == 19429684)
assert (model != preresnet1202_svhn or weight_count == 19423834)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 26,802 | 36.330084 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/alphapose_coco.py | """
AlphaPose for COCO Keypoint, implemented in Gluon.
Original paper: 'RMPE: Regional Multi-person Pose Estimation,' https://arxiv.org/abs/1612.00137.
"""
__all__ = ['AlphaPose', 'alphapose_fastseresnet101b_coco']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import PixelShuffle2D
from .common import conv3x3, DucBlock, HeatmapMaxDetBlock
from .fastseresnet import fastseresnet101b
class AlphaPose(HybridBlock):
"""
AlphaPose model from 'RMPE: Regional Multi-person Pose Estimation,' https://arxiv.org/abs/1612.00137.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
channels : list of int
Number of output channels for each decoder unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
return_heatmap : bool, default False
Whether to return only heatmap.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (256, 192)
Spatial size of the expected input image.
keypoints : int, default 17
Number of keypoints.
"""
def __init__(self,
backbone,
backbone_out_channels,
channels,
bn_use_global_stats=False,
bn_cudnn_off=False,
return_heatmap=False,
fixed_size=True,
in_channels=3,
in_size=(256, 192),
keypoints=17,
**kwargs):
super(AlphaPose, self).__init__(**kwargs)
assert (in_channels == 3)
self.in_size = in_size
self.keypoints = keypoints
self.return_heatmap = return_heatmap
with self.name_scope():
self.backbone = backbone
self.decoder = nn.HybridSequential(prefix="")
self.decoder.add(PixelShuffle2D(factor=2))
in_channels = backbone_out_channels // 4
for out_channels in channels:
self.decoder.add(DucBlock(
in_channels=in_channels,
out_channels=out_channels,
scale_factor=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = out_channels
self.decoder.add(conv3x3(
in_channels=in_channels,
out_channels=keypoints,
use_bias=True))
self.heatmap_max_det = HeatmapMaxDetBlock(
channels=keypoints,
in_size=(in_size[0] // 4, in_size[1] // 4),
fixed_size=fixed_size)
def hybrid_forward(self, F, x):
x = self.backbone(x)
# return self.decoder[0](x)
# return self.decoder[1](self.decoder[0](x))
# print(y[0, 0].asnumpy())
heatmap = self.decoder(x)
if self.return_heatmap:
return heatmap
else:
keypoints = self.heatmap_max_det(heatmap)
return keypoints
def get_alphapose(backbone,
backbone_out_channels,
keypoints,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create AlphaPose model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
keypoints : int
Number of keypoints.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = [256, 128]
net = AlphaPose(
backbone=backbone,
backbone_out_channels=backbone_out_channels,
channels=channels,
keypoints=keypoints,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def alphapose_fastseresnet101b_coco(pretrained_backbone=False, keypoints=17, **kwargs):
"""
AlphaPose model on the base of ResNet-101b for COCO Keypoint from 'RMPE: Regional Multi-person Pose Estimation,'
https://arxiv.org/abs/1612.00137.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = fastseresnet101b(pretrained=pretrained_backbone).features[:-1]
return get_alphapose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints,
model_name="alphapose_fastseresnet101b_coco", **kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (256, 192)
keypoints = 17
return_heatmap = False
pretrained = False
models = [
alphapose_fastseresnet101b_coco,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != alphapose_fastseresnet101b_coco or weight_count == 59569873)
batch = 14
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert ((y.shape[0] == batch) and (y.shape[1] == keypoints))
if return_heatmap:
assert ((y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4))
else:
assert (y.shape[2] == 3)
if __name__ == "__main__":
_test()
| 7,299 | 32.953488 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/pyramidnet.py | """
PyramidNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
"""
__all__ = ['PyramidNet', 'pyramidnet101_a360', 'PyrUnit']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import pre_conv1x1_block, pre_conv3x3_block
from .preresnet import PreResActivation
class PyrBlock(HybridBlock):
"""
Simple PyramidNet block for residual path in PyramidNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
**kwargs):
super(PyrBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = pre_conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activate=False)
self.conv2 = pre_conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class PyrBottleneck(HybridBlock):
"""
PyramidNet bottleneck block for residual path in PyramidNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
**kwargs):
super(PyrBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
activate=False)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class PyrUnit(HybridBlock):
"""
PyramidNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
**kwargs):
super(PyrUnit, self).__init__(**kwargs)
assert (out_channels >= in_channels)
self.resize_identity = (strides != 1)
self.identity_pad_width = out_channels - in_channels
with self.name_scope():
if bottleneck:
self.body = PyrBottleneck(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
strides=strides)
else:
self.body = PyrBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
strides=strides)
self.bn = nn.BatchNorm(
in_channels=out_channels,
use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_pool = nn.AvgPool2D(
pool_size=2,
strides=strides,
ceil_mode=True)
def hybrid_forward(self, F, x):
identity = x
x = self.body(x)
x = self.bn(x)
if self.resize_identity:
identity = self.identity_pool(identity)
if self.identity_pad_width > 0:
identity = F.concat(
identity,
F.zeros_like(F.slice_axis(x, axis=1, begin=0, end=self.identity_pad_width)),
dim=1)
x = x + identity
return x
class PyrInitBlock(HybridBlock):
"""
PyramidNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(PyrInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=7,
strides=2,
padding=3,
use_bias=False,
in_channels=in_channels)
self.bn = nn.BatchNorm(
in_channels=out_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.bn(x)
x = self.activ(x)
x = self.pool(x)
return x
class PyramidNet(HybridBlock):
"""
PyramidNet model from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(PyramidNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(PyrInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(PyrUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_pyramidnet(blocks,
alpha,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create PyramidNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
alpha : int
PyramidNet's alpha value.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14:
layers = [2, 2, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
growth_add = float(alpha) / float(sum(layers))
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [[(i + 1) * growth_add + xi[-1][-1] for i in list(range(yi))]],
layers,
[[init_block_channels]])[1:]
channels = [[int(round(cij)) for cij in ci] for ci in channels]
if blocks < 50:
bottleneck = False
else:
bottleneck = True
channels = [[cij * 4 for cij in ci] for ci in channels]
net = PyramidNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def pyramidnet101_a360(**kwargs):
"""
PyramidNet-101 model from 'Deep Pyramidal Residual Networks,' https://arxiv.org/abs/1610.02915.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_pyramidnet(blocks=101, alpha=360, model_name="pyramidnet101_a360", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
pyramidnet101_a360,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != pyramidnet101_a360 or weight_count == 42455070)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 13,534 | 31.149644 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/seresnet.py | """
SE-ResNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEResNet', 'seresnet10', 'seresnet12', 'seresnet14', 'seresnet16', 'seresnet18', 'seresnet26',
'seresnetbc26b', 'seresnet34', 'seresnetbc38b', 'seresnet50', 'seresnet50b', 'seresnet101', 'seresnet101b',
'seresnet152', 'seresnet152b', 'seresnet200', 'seresnet200b', 'SEResUnit', 'get_seresnet']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, SEBlock
from .resnet import ResBlock, ResBottleneck, ResInitBlock
class SEResUnit(HybridBlock):
"""
SE-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
conv1_stride,
**kwargs):
super(SEResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
conv1_stride=conv1_stride)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
self.se = SEBlock(channels=out_channels)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = self.se(x)
x = x + identity
x = self.activ(x)
return x
class SEResNet(HybridBlock):
"""
SE-ResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SEResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SEResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_seresnet(blocks,
bottleneck=None,
conv1_stride=True,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SE-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported SE-ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def seresnet10(**kwargs):
"""
SE-ResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=10, model_name="seresnet10", **kwargs)
def seresnet12(**kwargs):
"""
SE-ResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=12, model_name="seresnet12", **kwargs)
def seresnet14(**kwargs):
"""
SE-ResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=14, model_name="seresnet14", **kwargs)
def seresnet16(**kwargs):
"""
SE-ResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=16, model_name="seresnet16", **kwargs)
def seresnet18(**kwargs):
"""
SE-ResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=18, model_name="seresnet18", **kwargs)
def seresnet26(**kwargs):
"""
SE-ResNet-26 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=26, bottleneck=False, model_name="seresnet26", **kwargs)
def seresnetbc26b(**kwargs):
"""
SE-ResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="seresnetbc26b", **kwargs)
def seresnet34(**kwargs):
"""
SE-ResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=34, model_name="seresnet34", **kwargs)
def seresnetbc38b(**kwargs):
"""
SE-ResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="seresnetbc38b", **kwargs)
def seresnet50(**kwargs):
"""
SE-ResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=50, model_name="seresnet50", **kwargs)
def seresnet50b(**kwargs):
"""
SE-ResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=50, conv1_stride=False, model_name="seresnet50b", **kwargs)
def seresnet101(**kwargs):
"""
SE-ResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=101, model_name="seresnet101", **kwargs)
def seresnet101b(**kwargs):
"""
SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=101, conv1_stride=False, model_name="seresnet101b", **kwargs)
def seresnet152(**kwargs):
"""
SE-ResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=152, model_name="seresnet152", **kwargs)
def seresnet152b(**kwargs):
"""
SE-ResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=152, conv1_stride=False, model_name="seresnet152b", **kwargs)
def seresnet200(**kwargs):
"""
SE-ResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=200, model_name="seresnet200", **kwargs)
def seresnet200b(**kwargs):
"""
SE-ResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(blocks=200, conv1_stride=False, model_name="seresnet200b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
seresnet10,
seresnet12,
seresnet14,
seresnet16,
seresnet18,
seresnet26,
seresnetbc26b,
seresnet34,
seresnetbc38b,
seresnet50,
seresnet50b,
seresnet101,
seresnet101b,
seresnet152,
seresnet152b,
seresnet200,
seresnet200b,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnet10 or weight_count == 5463332)
assert (model != seresnet12 or weight_count == 5537896)
assert (model != seresnet14 or weight_count == 5835504)
assert (model != seresnet16 or weight_count == 7024640)
assert (model != seresnet18 or weight_count == 11778592)
assert (model != seresnet26 or weight_count == 18093852)
assert (model != seresnetbc26b or weight_count == 17395976)
assert (model != seresnet34 or weight_count == 21958868)
assert (model != seresnetbc38b or weight_count == 24026616)
assert (model != seresnet50 or weight_count == 28088024)
assert (model != seresnet50b or weight_count == 28088024)
assert (model != seresnet101 or weight_count == 49326872)
assert (model != seresnet101b or weight_count == 49326872)
assert (model != seresnet152 or weight_count == 66821848)
assert (model != seresnet152b or weight_count == 66821848)
assert (model != seresnet200 or weight_count == 71835864)
assert (model != seresnet200b or weight_count == 71835864)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 20,802 | 33.385124 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/seresnet_cub.py | """
SE-ResNet for CUB-200-2011, implemented in Gluon.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['seresnet10_cub', 'seresnet12_cub', 'seresnet14_cub', 'seresnetbc14b_cub', 'seresnet16_cub',
'seresnet18_cub', 'seresnet26_cub', 'seresnetbc26b_cub', 'seresnet34_cub', 'seresnetbc38b_cub',
'seresnet50_cub', 'seresnet50b_cub', 'seresnet101_cub', 'seresnet101b_cub', 'seresnet152_cub',
'seresnet152b_cub', 'seresnet200_cub', 'seresnet200b_cub']
from .seresnet import get_seresnet
def seresnet10_cub(classes=200, **kwargs):
"""
SE-ResNet-10 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=10, model_name="seresnet10_cub", **kwargs)
def seresnet12_cub(classes=200, **kwargs):
"""
SE-ResNet-12 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=12, model_name="seresnet12_cub", **kwargs)
def seresnet14_cub(classes=200, **kwargs):
"""
SE-ResNet-14 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=14, model_name="seresnet14_cub", **kwargs)
def seresnetbc14b_cub(classes=200, **kwargs):
"""
SE-ResNet-BC-14b model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=14, bottleneck=True, conv1_stride=False, model_name="seresnetbc14b_cub",
**kwargs)
def seresnet16_cub(classes=200, **kwargs):
"""
SE-ResNet-16 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=16, model_name="seresnet16_cub", **kwargs)
def seresnet18_cub(classes=200, **kwargs):
"""
SE-ResNet-18 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=18, model_name="seresnet18_cub", **kwargs)
def seresnet26_cub(classes=200, **kwargs):
"""
SE-ResNet-26 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=26, bottleneck=False, model_name="seresnet26_cub", **kwargs)
def seresnetbc26b_cub(classes=200, **kwargs):
"""
SE-ResNet-BC-26b model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=26, bottleneck=True, conv1_stride=False, model_name="seresnetbc26b_cub",
**kwargs)
def seresnet34_cub(classes=200, **kwargs):
"""
SE-ResNet-34 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=34, model_name="seresnet34_cub", **kwargs)
def seresnetbc38b_cub(classes=200, **kwargs):
"""
SE-ResNet-BC-38b model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=38, bottleneck=True, conv1_stride=False, model_name="seresnetbc38b_cub",
**kwargs)
def seresnet50_cub(classes=200, **kwargs):
"""
SE-ResNet-50 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=50, model_name="seresnet50_cub", **kwargs)
def seresnet50b_cub(classes=200, **kwargs):
"""
SE-ResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation Networks,'
https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=50, conv1_stride=False, model_name="seresnet50b_cub", **kwargs)
def seresnet101_cub(classes=200, **kwargs):
"""
SE-ResNet-101 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=101, model_name="seresnet101_cub", **kwargs)
def seresnet101b_cub(classes=200, **kwargs):
"""
SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=101, conv1_stride=False, model_name="seresnet101b_cub", **kwargs)
def seresnet152_cub(classes=200, **kwargs):
"""
SE-ResNet-152 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=152, model_name="seresnet152_cub", **kwargs)
def seresnet152b_cub(classes=200, **kwargs):
"""
SE-ResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=152, conv1_stride=False, model_name="seresnet152b_cub", **kwargs)
def seresnet200_cub(classes=200, **kwargs):
"""
SE-ResNet-200 model for CUB-200-2011 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=200, model_name="seresnet200_cub", **kwargs)
def seresnet200b_cub(classes=200, **kwargs):
"""
SE-ResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet(classes=classes, blocks=200, conv1_stride=False, model_name="seresnet200b_cub", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
seresnet10_cub,
seresnet12_cub,
seresnet14_cub,
seresnetbc14b_cub,
seresnet16_cub,
seresnet18_cub,
seresnet26_cub,
seresnetbc26b_cub,
seresnet34_cub,
seresnetbc38b_cub,
seresnet50_cub,
seresnet50b_cub,
seresnet101_cub,
seresnet101b_cub,
seresnet152_cub,
seresnet152b_cub,
seresnet200_cub,
seresnet200b_cub,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnet10_cub or weight_count == 5052932)
assert (model != seresnet12_cub or weight_count == 5127496)
assert (model != seresnet14_cub or weight_count == 5425104)
assert (model != seresnetbc14b_cub or weight_count == 9126136)
assert (model != seresnet16_cub or weight_count == 6614240)
assert (model != seresnet18_cub or weight_count == 11368192)
assert (model != seresnet26_cub or weight_count == 17683452)
assert (model != seresnetbc26b_cub or weight_count == 15756776)
assert (model != seresnet34_cub or weight_count == 21548468)
assert (model != seresnetbc38b_cub or weight_count == 22387416)
assert (model != seresnet50_cub or weight_count == 26448824)
assert (model != seresnet50b_cub or weight_count == 26448824)
assert (model != seresnet101_cub or weight_count == 47687672)
assert (model != seresnet101b_cub or weight_count == 47687672)
assert (model != seresnet152_cub or weight_count == 65182648)
assert (model != seresnet152b_cub or weight_count == 65182648)
assert (model != seresnet200_cub or weight_count == 70196664)
assert (model != seresnet200b_cub or weight_count == 70196664)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 200))
if __name__ == "__main__":
_test()
| 15,666 | 36.037825 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/densenet.py | """
DenseNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
"""
__all__ = ['DenseNet', 'densenet121', 'densenet161', 'densenet169', 'densenet201', 'DenseUnit', 'TransitionBlock']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import pre_conv1x1_block, pre_conv3x3_block
from .preresnet import PreResInitBlock, PreResActivation
class DenseUnit(HybridBlock):
"""
DenseNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
dropout_rate,
**kwargs):
super(DenseUnit, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
bn_size = 4
inc_channels = out_channels - in_channels
mid_channels = inc_channels * bn_size
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=inc_channels,
bn_use_global_stats=bn_use_global_stats)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
if self.use_dropout:
x = self.dropout(x)
x = F.concat(identity, x, dim=1)
return x
class TransitionBlock(HybridBlock):
"""
DenseNet's auxiliary block, which can be treated as the initial part of the DenseNet unit, triggered only in the
first unit of each stage.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(TransitionBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = pre_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
self.pool = nn.AvgPool2D(
pool_size=2,
strides=2,
padding=0)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pool(x)
return x
class DenseNet(HybridBlock):
"""
DenseNet model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bn_use_global_stats=False,
dropout_rate=0.0,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(DenseNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
if i != 0:
stage.add(TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2),
bn_use_global_stats=bn_use_global_stats))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
stage.add(DenseUnit(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
dropout_rate=dropout_rate))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_densenet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DenseNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 121:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif blocks == 161:
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif blocks == 169:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif blocks == 201:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
else:
raise ValueError("Unsupported DenseNet version with number of layers {}".format(blocks))
from functools import reduce
channels = reduce(lambda xi, yi:
xi + [reduce(lambda xj, yj:
xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = DenseNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def densenet121(**kwargs):
"""
DenseNet-121 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(blocks=121, model_name="densenet121", **kwargs)
def densenet161(**kwargs):
"""
DenseNet-161 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(blocks=161, model_name="densenet161", **kwargs)
def densenet169(**kwargs):
"""
DenseNet-169 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(blocks=169, model_name="densenet169", **kwargs)
def densenet201(**kwargs):
"""
DenseNet-201 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(blocks=201, model_name="densenet201", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
densenet121,
densenet161,
densenet169,
densenet201,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != densenet121 or weight_count == 7978856)
assert (model != densenet161 or weight_count == 28681000)
assert (model != densenet169 or weight_count == 14149480)
assert (model != densenet201 or weight_count == 20013928)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 11,737 | 32.346591 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/seresnext.py | """
SE-ResNeXt for ImageNet-1K, implemented in Gluon.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEResNeXt', 'seresnext50_32x4d', 'seresnext101_32x4d', 'seresnext101_64x4d']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, SEBlock
from .resnet import ResInitBlock
from .resnext import ResNeXtBottleneck
class SEResNeXtUnit(HybridBlock):
"""
SE-ResNeXt unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bn_use_global_stats,
**kwargs):
super(SEResNeXtUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = ResNeXtBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats)
self.se = SEBlock(channels=out_channels)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = self.se(x)
x = x + identity
x = self.activ(x)
return x
class SEResNeXt(HybridBlock):
"""
SE-ResNeXt model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SEResNeXt, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SEResNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_seresnext(blocks,
cardinality,
bottleneck_width,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SE-ResNeXt model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported SE-ResNeXt with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEResNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def seresnext50_32x4d(**kwargs):
"""
SE-ResNeXt-50 (32x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="seresnext50_32x4d", **kwargs)
def seresnext101_32x4d(**kwargs):
"""
SE-ResNeXt-101 (32x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="seresnext101_32x4d", **kwargs)
def seresnext101_64x4d(**kwargs):
"""
SE-ResNeXt-101 (64x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="seresnext101_64x4d", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
seresnext50_32x4d,
seresnext101_32x4d,
seresnext101_64x4d,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnext50_32x4d or weight_count == 27559896)
assert (model != seresnext101_32x4d or weight_count == 48955416)
assert (model != seresnext101_64x4d or weight_count == 88232984)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 9,912 | 32.265101 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/darts.py | """
DARTS for ImageNet-1K, implemented in Gluon.
Original paper: 'DARTS: Differentiable Architecture Search,' https://arxiv.org/abs/1806.09055.
"""
__all__ = ['DARTS', 'darts']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import Identity
from .common import conv1x1
from .nasnet import nasnet_dual_path_sequential
class DwsConv(HybridBlock):
"""
Standard dilated depthwise separable convolution block with.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layers use a bias vector.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation,
use_bias=False,
**kwargs):
super(DwsConv, self).__init__(**kwargs)
with self.name_scope():
self.dw_conv = nn.Conv2D(
channels=in_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=in_channels,
use_bias=use_bias,
in_channels=in_channels)
self.pw_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias)
def hybrid_forward(self, F, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
class DartsConv(HybridBlock):
"""
DARTS specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
activate=True,
**kwargs):
super(DartsConv, self).__init__(**kwargs)
self.activate = activate
with self.name_scope():
if self.activate:
self.activ = nn.Activation("relu")
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=False,
in_channels=in_channels)
self.bn = nn.BatchNorm(in_channels=out_channels)
def hybrid_forward(self, F, x):
if self.activate:
x = self.activ(x)
x = self.conv(x)
x = self.bn(x)
return x
def darts_conv1x1(in_channels,
out_channels,
activate=True):
"""
1x1 version of the DARTS specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
activate : bool, default True
Whether activate the convolution block.
"""
return DartsConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=1,
padding=0,
activate=activate)
def darts_conv3x3_s2(in_channels,
out_channels,
activate=True):
"""
3x3 version of the DARTS specific convolution block with stride 2.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
activate : bool, default True
Whether activate the convolution block.
"""
return DartsConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=2,
padding=1,
activate=activate)
class DartsDwsConv(HybridBlock):
"""
DARTS specific dilated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int
Dilation value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation,
**kwargs):
super(DartsDwsConv, self).__init__(**kwargs)
with self.name_scope():
self.activ = nn.Activation("relu")
self.conv = DwsConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=False)
self.bn = nn.BatchNorm(in_channels=out_channels)
def hybrid_forward(self, F, x):
x = self.activ(x)
x = self.conv(x)
x = self.bn(x)
return x
class DartsDwsBranch(HybridBlock):
"""
DARTS specific block with depthwise separable convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
**kwargs):
super(DartsDwsBranch, self).__init__(**kwargs)
mid_channels = in_channels
with self.name_scope():
self.conv1 = DartsDwsConv(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=1)
self.conv2 = DartsDwsConv(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=1,
padding=padding,
dilation=1)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class DartsReduceBranch(HybridBlock):
"""
DARTS specific factorized reduce block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 2
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
strides=2,
**kwargs):
super(DartsReduceBranch, self).__init__(**kwargs)
assert (out_channels % 2 == 0)
mid_channels = out_channels // 2
with self.name_scope():
self.activ = nn.Activation("relu")
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
strides=strides)
self.conv2 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
strides=strides)
self.bn = nn.BatchNorm(in_channels=out_channels)
def hybrid_forward(self, F, x):
x = self.activ(x)
x1 = self.conv1(x)
x = F.slice(x, begin=(None, None, 1, 1), end=(None, None, None, None))
x2 = self.conv2(x)
x = F.concat(x1, x2, dim=1)
x = self.bn(x)
return x
class Stem1Unit(HybridBlock):
"""
DARTS Stem1 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(Stem1Unit, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.conv1 = darts_conv3x3_s2(
in_channels=in_channels,
out_channels=mid_channels,
activate=False)
self.conv2 = darts_conv3x3_s2(
in_channels=mid_channels,
out_channels=out_channels,
activate=True)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
def stem2_unit(in_channels,
out_channels):
"""
DARTS Stem2 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
return darts_conv3x3_s2(
in_channels=in_channels,
out_channels=out_channels,
activate=True)
def darts_maxpool3x3(channels,
strides):
"""
DARTS specific 3x3 Max pooling layer.
Parameters:
----------
channels : int
Number of input/output channels. Unused parameter.
strides : int or tuple/list of 2 int
Strides of the convolution.
"""
assert (channels > 0)
return nn.MaxPool2D(
pool_size=3,
strides=strides,
padding=1)
def darts_skip_connection(channels,
strides):
"""
DARTS specific skip connection layer.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
"""
assert (channels > 0)
if strides == 1:
return Identity()
else:
assert (strides == 2)
return DartsReduceBranch(
in_channels=channels,
out_channels=channels,
strides=strides)
def darts_dws_conv3x3(channels,
strides):
"""
3x3 version of DARTS specific dilated convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
"""
return DartsDwsConv(
in_channels=channels,
out_channels=channels,
kernel_size=3,
strides=strides,
padding=2,
dilation=2)
def darts_dws_branch3x3(channels,
strides):
"""
3x3 version of DARTS specific dilated convolution branch.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
"""
return DartsDwsBranch(
in_channels=channels,
out_channels=channels,
kernel_size=3,
strides=strides,
padding=1)
# Set of operations in genotype.
GENOTYPE_OPS = {
'max_pool_3x3': darts_maxpool3x3,
'skip_connect': darts_skip_connection,
'dil_conv_3x3': darts_dws_conv3x3,
'sep_conv_3x3': darts_dws_branch3x3,
}
class DartsMainBlock(HybridBlock):
"""
DARTS main block, described by genotype.
Parameters:
----------
genotype : list of tuples (str, int)
List of genotype elements (operations and linked indices).
channels : int
Number of input/output channels.
reduction : bool
Whether use reduction.
"""
def __init__(self,
genotype,
channels,
reduction,
**kwargs):
super(DartsMainBlock, self).__init__(**kwargs)
self.concat = [2, 3, 4, 5]
op_names, indices = zip(*genotype)
self.indices = indices
self.steps = len(op_names) // 2
with self.name_scope():
for i, (name, index) in enumerate(zip(op_names, indices)):
stride = 2 if reduction and index < 2 else 1
setattr(self, "ops{}".format(i + 1), GENOTYPE_OPS[name](channels, stride))
def hybrid_forward(self, F, x, x_prev):
s0 = x_prev
s1 = x
states = [s0, s1]
for i in range(self.steps):
j1 = 2 * i
j2 = 2 * i + 1
op1 = getattr(self, "ops{}".format(j1 + 1))
op2 = getattr(self, "ops{}".format(j2 + 1))
y1 = states[self.indices[j1]]
y2 = states[self.indices[j2]]
y1 = op1(y1)
y2 = op2(y2)
s = y1 + y2
states += [s]
x_out = F.concat(*[states[i] for i in self.concat], dim=1)
return x_out
class DartsUnit(HybridBlock):
"""
DARTS unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
genotype : list of tuples (str, int)
List of genotype elements (operations and linked indices).
reduction : bool
Whether use reduction.
prev_reduction : bool
Whether use previous reduction.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
genotype,
reduction,
prev_reduction,
**kwargs):
super(DartsUnit, self).__init__(**kwargs)
mid_channels = out_channels // 4
with self.name_scope():
if prev_reduction:
self.preprocess_prev = DartsReduceBranch(
in_channels=prev_in_channels,
out_channels=mid_channels)
else:
self.preprocess_prev = darts_conv1x1(
in_channels=prev_in_channels,
out_channels=mid_channels)
self.preprocess = darts_conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.body = DartsMainBlock(
genotype=genotype,
channels=mid_channels,
reduction=reduction)
def hybrid_forward(self, F, x, x_prev):
x = self.preprocess(x)
x_prev = self.preprocess_prev(x_prev)
x_out = self.body(x, x_prev)
return x_out
class DARTS(HybridBlock):
"""
DARTS model from 'DARTS: Differentiable Architecture Search,' https://arxiv.org/abs/1806.09055.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
stem_blocks_channels : int
Number of output channels for the Stem units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
stem_blocks_channels,
normal_genotype,
reduce_genotype,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(DARTS, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nasnet_dual_path_sequential(
return_two=False,
first_ordinals=2,
last_ordinals=1)
self.features.add(Stem1Unit(
in_channels=in_channels,
out_channels=stem_blocks_channels))
in_channels = stem_blocks_channels
self.features.add(stem2_unit(
in_channels=in_channels,
out_channels=stem_blocks_channels))
prev_in_channels = in_channels
in_channels = stem_blocks_channels
for i, channels_per_stage in enumerate(channels):
stage = nasnet_dual_path_sequential(prefix="stage{}_".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
reduction = (i != 0) and (j == 0)
prev_reduction = ((i == 0) and (j == 0)) or ((i != 0) and (j == 1))
genotype = reduce_genotype if reduction else normal_genotype
stage.add(DartsUnit(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels,
genotype=genotype,
reduction=reduction,
prev_reduction=prev_reduction))
prev_in_channels = in_channels
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_darts(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DARTS model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
stem_blocks_channels = 48
layers = [4, 5, 5]
channels_per_layers = [192, 384, 768]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
normal_genotype = [
('sep_conv_3x3', 0),
('sep_conv_3x3', 1),
('sep_conv_3x3', 0),
('sep_conv_3x3', 1),
('sep_conv_3x3', 1),
('skip_connect', 0),
('skip_connect', 0),
('dil_conv_3x3', 2)]
reduce_genotype = [
('max_pool_3x3', 0),
('max_pool_3x3', 1),
('skip_connect', 2),
('max_pool_3x3', 1),
('max_pool_3x3', 0),
('skip_connect', 2),
('skip_connect', 2),
('max_pool_3x3', 1)]
net = DARTS(
channels=channels,
stem_blocks_channels=stem_blocks_channels,
normal_genotype=normal_genotype,
reduce_genotype=reduce_genotype,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def darts(**kwargs):
"""
DARTS model from 'DARTS: Differentiable Architecture Search,' https://arxiv.org/abs/1806.09055.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_darts(model_name="darts", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
darts,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != darts or weight_count == 4718752)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 21,552 | 27.969086 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/drn.py | """
DRN for ImageNet-1K, implemented in Gluon.
Original paper: 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
"""
__all__ = ['DRN', 'drnc26', 'drnc42', 'drnc58', 'drnd22', 'drnd38', 'drnd54', 'drnd105']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
class DRNConv(HybridBlock):
"""
DRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int
Dilation value for convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activate : bool
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation,
bn_use_global_stats,
activate,
**kwargs):
super(DRNConv, self).__init__(**kwargs)
self.activate = activate
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=False,
in_channels=in_channels)
self.bn = nn.BatchNorm(
in_channels=out_channels,
use_global_stats=bn_use_global_stats)
if self.activate:
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def drn_conv1x1(in_channels,
out_channels,
strides,
bn_use_global_stats,
activate,
**kwargs):
"""
1x1 version of the DRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activate : bool
Whether activate the convolution block.
"""
return DRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
dilation=1,
bn_use_global_stats=bn_use_global_stats,
activate=activate,
**kwargs)
def drn_conv3x3(in_channels,
out_channels,
strides,
dilation,
bn_use_global_stats,
activate,
**kwargs):
"""
3x3 version of the DRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Padding/dilation value for convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activate : bool
Whether activate the convolution block.
"""
return DRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=dilation,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
activate=activate,
**kwargs)
class DRNBlock(HybridBlock):
"""
Simple DRN block for residual path in DRN unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Padding/dilation value for convolution layers.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
dilation,
bn_use_global_stats,
**kwargs):
super(DRNBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = drn_conv3x3(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
activate=True)
self.conv2 = drn_conv3x3(
in_channels=out_channels,
out_channels=out_channels,
strides=1,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
activate=False)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class DRNBottleneck(HybridBlock):
"""
DRN bottleneck block for residual path in DRN unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Padding/dilation value for 3x3 convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
dilation,
bn_use_global_stats,
**kwargs):
super(DRNBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
with self.name_scope():
self.conv1 = drn_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
strides=1,
bn_use_global_stats=bn_use_global_stats,
activate=True)
self.conv2 = drn_conv3x3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
activate=True)
self.conv3 = drn_conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
strides=1,
bn_use_global_stats=bn_use_global_stats,
activate=False)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class DRNUnit(HybridBlock):
"""
DRN unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Padding/dilation value for 3x3 convolution layers.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
simplified : bool
Whether to use a simple or simplified block in units.
residual : bool
Whether do residual calculations.
"""
def __init__(self,
in_channels,
out_channels,
strides,
dilation,
bn_use_global_stats,
bottleneck,
simplified,
residual,
**kwargs):
super(DRNUnit, self).__init__(**kwargs)
assert residual or (not bottleneck)
assert (not (bottleneck and simplified))
assert (not (residual and simplified))
self.residual = residual
self.resize_identity = ((in_channels != out_channels) or (strides != 1)) and self.residual and (not simplified)
with self.name_scope():
if bottleneck:
self.body = DRNBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats)
elif simplified:
self.body = drn_conv3x3(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
activate=False)
else:
self.body = DRNBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = drn_conv1x1(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activate=False)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
if self.residual:
x = x + identity
x = self.activ(x)
return x
def drn_init_block(in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
"""
DRN specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
return DRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=1,
padding=3,
dilation=1,
bn_use_global_stats=bn_use_global_stats,
activate=True,
**kwargs)
class DRN(HybridBlock):
"""
DRN-C&D model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
dilations : list of list of int
Dilation values for 3x3 convolution layers for each unit.
bottlenecks : list of list of int
Whether to use a bottleneck or simple block in each unit.
simplifieds : list of list of int
Whether to use a simple or simplified block in each unit.
residuals : list of list of int
Whether to use residual block in each unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
dilations,
bottlenecks,
simplifieds,
residuals,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(DRN, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(drn_init_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(DRNUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilations[i][j],
bn_use_global_stats=bn_use_global_stats,
bottleneck=(bottlenecks[i][j] == 1),
simplified=(simplifieds[i][j] == 1),
residual=(residuals[i][j] == 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=28,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Conv2D(
channels=classes,
kernel_size=1,
in_channels=in_channels))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_drn(blocks,
simplified=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DRN-C or DRN-D model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
simplified : bool, default False
Whether to use simplified scheme (D architecture).
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 22:
assert simplified
layers = [1, 1, 2, 2, 2, 2, 1, 1]
elif blocks == 26:
layers = [1, 1, 2, 2, 2, 2, 1, 1]
elif blocks == 38:
assert simplified
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 42:
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 54:
assert simplified
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 58:
layers = [1, 1, 3, 4, 6, 3, 1, 1]
elif blocks == 105:
assert simplified
layers = [1, 1, 3, 4, 23, 3, 1, 1]
else:
raise ValueError("Unsupported DRN with number of blocks: {}".format(blocks))
if blocks < 50:
channels_per_layers = [16, 32, 64, 128, 256, 512, 512, 512]
bottlenecks_per_layers = [0, 0, 0, 0, 0, 0, 0, 0]
else:
channels_per_layers = [16, 32, 256, 512, 1024, 2048, 512, 512]
bottlenecks_per_layers = [0, 0, 1, 1, 1, 1, 0, 0]
if simplified:
simplifieds_per_layers = [1, 1, 0, 0, 0, 0, 1, 1]
residuals_per_layers = [0, 0, 1, 1, 1, 1, 0, 0]
else:
simplifieds_per_layers = [0, 0, 0, 0, 0, 0, 0, 0]
residuals_per_layers = [1, 1, 1, 1, 1, 1, 0, 0]
dilations_per_layers = [1, 1, 1, 1, 2, 4, 2, 1]
downsample = [0, 1, 1, 1, 0, 0, 0, 0]
def expand(property_per_layers):
from functools import reduce
return reduce(
lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(property_per_layers, layers, downsample),
[[]])
channels = expand(channels_per_layers)
dilations = expand(dilations_per_layers)
bottlenecks = expand(bottlenecks_per_layers)
residuals = expand(residuals_per_layers)
simplifieds = expand(simplifieds_per_layers)
init_block_channels = channels_per_layers[0]
net = DRN(
channels=channels,
init_block_channels=init_block_channels,
dilations=dilations,
bottlenecks=bottlenecks,
simplifieds=simplifieds,
residuals=residuals,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def drnc26(**kwargs):
"""
DRN-C-26 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=26, model_name="drnc26", **kwargs)
def drnc42(**kwargs):
"""
DRN-C-42 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=42, model_name="drnc42", **kwargs)
def drnc58(**kwargs):
"""
DRN-C-58 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=58, model_name="drnc58", **kwargs)
def drnd22(**kwargs):
"""
DRN-D-58 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=22, simplified=True, model_name="drnd22", **kwargs)
def drnd38(**kwargs):
"""
DRN-D-38 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=38, simplified=True, model_name="drnd38", **kwargs)
def drnd54(**kwargs):
"""
DRN-D-54 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=54, simplified=True, model_name="drnd54", **kwargs)
def drnd105(**kwargs):
"""
DRN-D-105 model from 'Dilated Residual Networks,' https://arxiv.org/abs/1705.09914.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_drn(blocks=105, simplified=True, model_name="drnd105", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
drnc26,
drnc42,
drnc58,
drnd22,
drnd38,
drnd54,
drnd105,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != drnc26 or weight_count == 21126584)
assert (model != drnc42 or weight_count == 31234744)
assert (model != drnc58 or weight_count == 40542008) # 41591608
assert (model != drnd22 or weight_count == 16393752)
assert (model != drnd38 or weight_count == 26501912)
assert (model != drnd54 or weight_count == 35809176)
assert (model != drnd105 or weight_count == 54801304)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 22,644 | 31.35 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/mixnet.py | """
MixNet for ImageNet-1K, implemented in Gluon.
Original paper: 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
"""
__all__ = ['MixNet', 'mixnet_s', 'mixnet_m', 'mixnet_l']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import round_channels, get_activation_layer, split, conv1x1_block, conv3x3_block, dwconv3x3_block, SEBlock
class MixConv(HybridBlock):
"""
Mixed convolution layer from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of int, or tuple/list of tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of int, or tuple/list of tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
axis : int, default 1
The axis on which to concatenate the outputs.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
axis=1,
**kwargs):
super(MixConv, self).__init__(**kwargs)
kernel_size = kernel_size if isinstance(kernel_size, list) else [kernel_size]
padding = padding if isinstance(padding, list) else [padding]
kernel_count = len(kernel_size)
self.splitted_in_channels = self.split_channels(in_channels, kernel_count)
splitted_out_channels = self.split_channels(out_channels, kernel_count)
self.axis = axis
with self.name_scope():
for i, kernel_size_i in enumerate(kernel_size):
in_channels_i = self.splitted_in_channels[i]
out_channels_i = splitted_out_channels[i]
padding_i = padding[i]
self.register_child(
nn.Conv2D(
channels=out_channels_i,
kernel_size=kernel_size_i,
strides=strides,
padding=padding_i,
dilation=dilation,
groups=(out_channels_i if out_channels == groups else groups),
use_bias=use_bias,
in_channels=in_channels_i))
def hybrid_forward(self, F, x):
xx = split(x, sizes=self.splitted_in_channels, axis=self.axis)
out = [conv_i(x_i) for x_i, conv_i in zip(xx, self._children.values())]
x = F.concat(*out, dim=self.axis)
return x
@staticmethod
def split_channels(channels, kernel_count):
splitted_channels = [channels // kernel_count] * kernel_count
splitted_channels[0] += channels - sum(splitted_channels)
return splitted_channels
class MixConvBlock(HybridBlock):
"""
Mixed convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of int, or tuple/list of tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of int, or tuple/list of tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : function or str or None, default nn.Activation("relu")
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
super(MixConvBlock, self).__init__(**kwargs)
self.activate = (activation is not None)
self.use_bn = use_bn
with self.name_scope():
self.conv = MixConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias)
if self.use_bn:
self.bn = nn.BatchNorm(
in_channels=out_channels,
epsilon=bn_epsilon,
use_global_stats=bn_use_global_stats)
if self.activate:
self.activ = get_activation_layer(activation)
def hybrid_forward(self, F, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def mixconv1x1_block(in_channels,
out_channels,
kernel_count,
strides=1,
groups=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
1x1 version of the mixed convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_count : int
Kernel count.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : function or str, or None, default nn.Activation("relu")
Activation function or name of activation function.
"""
return MixConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=([1] * kernel_count),
strides=strides,
padding=([0] * kernel_count),
groups=groups,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation,
**kwargs)
class MixUnit(HybridBlock):
"""
MixNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
exp_channels : int
Number of middle (expanded) channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
exp_kernel_count : int
Expansion convolution kernel count for each unit.
conv1_kernel_count : int
Conv1 kernel count for each unit.
conv2_kernel_count : int
Conv2 kernel count for each unit.
exp_factor : int
Expansion factor for each unit.
se_factor : int
SE reduction factor for each unit.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activation : str
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
strides,
exp_kernel_count,
conv1_kernel_count,
conv2_kernel_count,
exp_factor,
se_factor,
bn_use_global_stats,
activation,
**kwargs):
super(MixUnit, self).__init__(**kwargs)
assert (exp_factor >= 1)
assert (se_factor >= 0)
self.residual = (in_channels == out_channels) and (strides == 1)
self.use_se = se_factor > 0
mid_channels = exp_factor * in_channels
self.use_exp_conv = exp_factor > 1
with self.name_scope():
if self.use_exp_conv:
if exp_kernel_count == 1:
self.exp_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
else:
self.exp_conv = mixconv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
kernel_count=exp_kernel_count,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
if conv1_kernel_count == 1:
self.conv1 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
else:
self.conv1 = MixConvBlock(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=[3 + 2 * i for i in range(conv1_kernel_count)],
strides=strides,
padding=[1 + i for i in range(conv1_kernel_count)],
groups=mid_channels,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=(exp_factor * se_factor),
round_mid=False,
mid_activation=activation)
if conv2_kernel_count == 1:
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
else:
self.conv2 = mixconv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
kernel_count=conv2_kernel_count,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.exp_conv(x)
x = self.conv1(x)
if self.use_se:
x = self.se(x)
x = self.conv2(x)
if self.residual:
x = x + identity
return x
class MixInitBlock(HybridBlock):
"""
MixNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(MixInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = MixUnit(
in_channels=out_channels,
out_channels=out_channels,
strides=1,
exp_kernel_count=1,
conv1_kernel_count=1,
conv2_kernel_count=1,
exp_factor=1,
se_factor=0,
bn_use_global_stats=bn_use_global_stats,
activation="relu")
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class MixNet(HybridBlock):
"""
MixNet model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
exp_kernel_counts : list of list of int
Expansion convolution kernel count for each unit.
conv1_kernel_counts : list of list of int
Conv1 kernel count for each unit.
conv2_kernel_counts : list of list of int
Conv2 kernel count for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
se_factors : list of list of int
SE reduction factor for each unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
exp_kernel_counts,
conv1_kernel_counts,
conv2_kernel_counts,
exp_factors,
se_factors,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(MixNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(MixInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if ((j == 0) and (i != 3)) or\
((j == len(channels_per_stage) // 2) and (i == 3)) else 1
exp_kernel_count = exp_kernel_counts[i][j]
conv1_kernel_count = conv1_kernel_counts[i][j]
conv2_kernel_count = conv2_kernel_counts[i][j]
exp_factor = exp_factors[i][j]
se_factor = se_factors[i][j]
activation = "relu" if i == 0 else "swish"
stage.add(MixUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
exp_kernel_count=exp_kernel_count,
conv1_kernel_count=conv1_kernel_count,
conv2_kernel_count=conv2_kernel_count,
exp_factor=exp_factor,
se_factor=se_factor,
bn_use_global_stats=bn_use_global_stats,
activation=activation))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_use_global_stats=bn_use_global_stats,
activation=activation))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_mixnet(version,
width_scale,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create MixNet model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('s' or 'm').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version == "s":
init_block_channels = 16
channels = [[24, 24], [40, 40, 40, 40], [80, 80, 80], [120, 120, 120, 200, 200, 200]]
exp_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 1, 1], [2, 2, 2, 1, 1, 1]]
conv1_kernel_counts = [[1, 1], [3, 2, 2, 2], [3, 2, 2], [3, 4, 4, 5, 4, 4]]
conv2_kernel_counts = [[2, 2], [1, 2, 2, 2], [2, 2, 2], [2, 2, 2, 1, 2, 2]]
exp_factors = [[6, 3], [6, 6, 6, 6], [6, 6, 6], [6, 3, 3, 6, 6, 6]]
se_factors = [[0, 0], [2, 2, 2, 2], [4, 4, 4], [2, 2, 2, 2, 2, 2]]
elif version == "m":
init_block_channels = 24
channels = [[32, 32], [40, 40, 40, 40], [80, 80, 80, 80], [120, 120, 120, 120, 200, 200, 200, 200]]
exp_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 2, 2, 2], [1, 2, 2, 2, 1, 1, 1, 1]]
conv1_kernel_counts = [[3, 1], [4, 2, 2, 2], [3, 4, 4, 4], [1, 4, 4, 4, 4, 4, 4, 4]]
conv2_kernel_counts = [[2, 2], [1, 2, 2, 2], [1, 2, 2, 2], [1, 2, 2, 2, 1, 2, 2, 2]]
exp_factors = [[6, 3], [6, 6, 6, 6], [6, 6, 6, 6], [6, 3, 3, 3, 6, 6, 6, 6]]
se_factors = [[0, 0], [2, 2, 2, 2], [4, 4, 4, 4], [2, 2, 2, 2, 2, 2, 2, 2]]
else:
raise ValueError("Unsupported MixNet version {}".format(version))
final_block_channels = 1536
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = round_channels(init_block_channels * width_scale)
net = MixNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
exp_kernel_counts=exp_kernel_counts,
conv1_kernel_counts=conv1_kernel_counts,
conv2_kernel_counts=conv2_kernel_counts,
exp_factors=exp_factors,
se_factors=se_factors,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def mixnet_s(**kwargs):
"""
MixNet-S model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mixnet(version="s", width_scale=1.0, model_name="mixnet_s", **kwargs)
def mixnet_m(**kwargs):
"""
MixNet-M model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mixnet(version="m", width_scale=1.0, model_name="mixnet_m", **kwargs)
def mixnet_l(**kwargs):
"""
MixNet-L model from 'MixConv: Mixed Depthwise Convolutional Kernels,' https://arxiv.org/abs/1907.09595.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mixnet(version="m", width_scale=1.3, model_name="mixnet_l", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
mixnet_s,
mixnet_m,
mixnet_l,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mixnet_s or weight_count == 4134606)
assert (model != mixnet_m or weight_count == 5014382)
assert (model != mixnet_l or weight_count == 7329252)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 23,494 | 35.826019 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/dabnet.py | """
DABNet for image segmentation, implemented in Gluon.
Original paper: 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1907.11357.
"""
__all__ = ['DABNet', 'dabnet_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv3x3, conv3x3_block, ConvBlock, NormActivation, Concurrent, InterpolationBlock,\
DualPathSequential, PReLU2
class DwaConvBlock(HybridBlock):
"""
Depthwise asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
kernel_size : int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
def __init__(self,
channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
super(DwaConvBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(kernel_size, 1),
strides=strides,
padding=(padding, 0),
dilation=(dilation, 1),
groups=channels,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation)
self.conv2 = ConvBlock(
in_channels=channels,
out_channels=channels,
kernel_size=(1, kernel_size),
strides=strides,
padding=(0, padding),
dilation=(1, dilation),
groups=channels,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
def dwa_conv3x3_block(channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
3x3 version of the depthwise asymmetric separable convolution block.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int, default 1
Strides of the convolution.
padding : int, default 1
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
return DwaConvBlock(
channels=channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation,
**kwargs)
class DABBlock(HybridBlock):
"""
DABNet specific base block.
Parameters:
----------
channels : int
Number of input/output channels.
dilation : int
Dilation value for a dilated branch in the unit.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
channels,
dilation,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DABBlock, self).__init__(**kwargs)
mid_channels = channels // 2
with self.name_scope():
self.norm_activ1 = NormActivation(
in_channels=channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(channels)))
self.conv1 = conv3x3_block(
in_channels=channels,
out_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid_channels)))
self.branches = Concurrent(stack=True)
self.branches.add(dwa_conv3x3_block(
channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid_channels))))
self.branches.add(dwa_conv3x3_block(
channels=mid_channels,
padding=dilation,
dilation=dilation,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid_channels))))
self.norm_activ2 = NormActivation(
in_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid_channels)))
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=channels)
def hybrid_forward(self, F, x):
identity = x
x = self.norm_activ1(x)
x = self.conv1(x)
x = self.branches(x)
x = x.sum(axis=1)
x = self.norm_activ2(x)
x = self.conv2(x)
x = x + identity
return x
class DownBlock(HybridBlock):
"""
DABNet specific downsample block for the main branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DownBlock, self).__init__(**kwargs)
self.expand = (in_channels < out_channels)
mid_channels = out_channels - in_channels if self.expand else out_channels
with self.name_scope():
self.conv = conv3x3(
in_channels=in_channels,
out_channels=mid_channels,
strides=2)
if self.expand:
self.pool = nn.MaxPool2D(
pool_size=2,
strides=2)
self.norm_activ = NormActivation(
in_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
def hybrid_forward(self, F, x):
y = self.conv(x)
if self.expand:
z = self.pool(x)
y = F.concat(y, z, dim=1)
y = self.norm_activ(y)
return y
class DABUnit(HybridBlock):
"""
DABNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dilations : list of int
Dilations for blocks.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
dilations,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DABUnit, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.down = DownBlock(
in_channels=in_channels,
out_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.blocks = nn.HybridSequential(prefix="")
for i, dilation in enumerate(dilations):
self.blocks.add(DABBlock(
channels=mid_channels,
dilation=dilation,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
def hybrid_forward(self, F, x):
x = self.down(x)
y = self.blocks(x)
x = F.concat(y, x, dim=1)
return x
class DABStage(HybridBlock):
"""
DABNet stage.
Parameters:
----------
x_channels : int
Number of input/output channels for x.
y_in_channels : int
Number of input channels for y.
y_out_channels : int
Number of output channels for y.
dilations : list of int
Dilations for blocks.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
x_channels,
y_in_channels,
y_out_channels,
dilations,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DABStage, self).__init__(**kwargs)
self.use_unit = (len(dilations) > 0)
with self.name_scope():
self.x_down = nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1)
if self.use_unit:
self.unit = DABUnit(
in_channels=y_in_channels,
out_channels=(y_out_channels - x_channels),
dilations=dilations,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.norm_activ = NormActivation(
in_channels=y_out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(y_out_channels)))
def hybrid_forward(self, F, y, x):
x = self.x_down(x)
if self.use_unit:
y = self.unit(y)
y = F.concat(y, x, dim=1)
y = self.norm_activ(y)
return y, x
class DABInitBlock(HybridBlock):
"""
DABNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(DABInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
self.conv3 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class DABNet(HybridBlock):
"""
DABNet model from 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1907.11357.
Parameters:
----------
channels : list of int
Number of output channels for each unit (for y-branch).
init_block_channels : int
Number of output channels for the initial unit.
dilations : list of list of int
Dilations for blocks.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
channels,
init_block_channels,
dilations,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
classes=19,
**kwargs):
super(DABNet, self).__init__(**kwargs)
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
with self.name_scope():
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=0)
self.features.add(DABInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
y_in_channels = init_block_channels
for i, (y_out_channels, dilations_i) in enumerate(zip(channels, dilations)):
self.features.add(DABStage(
x_channels=in_channels,
y_in_channels=y_in_channels,
y_out_channels=y_out_channels,
dilations=dilations_i,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
y_in_channels = y_out_channels
self.classifier = conv1x1(
in_channels=y_in_channels,
out_channels=classes)
self.up = InterpolationBlock(scale_factor=8)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
y = self.features(x, x)
y = self.classifier(y)
y = self.up(y, in_size)
return y
def get_dabnet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DABNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
channels = [35, 131, 259]
dilations = [[], [2, 2, 2], [4, 4, 8, 8, 16, 16]]
bn_epsilon = 1e-3
net = DABNet(
channels=channels,
init_block_channels=init_block_channels,
dilations=dilations,
bn_epsilon=bn_epsilon,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def dabnet_cityscapes(classes=19, **kwargs):
"""
DABNet model for Cityscapes from 'DABNet: Depth-wise Asymmetric Bottleneck for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1907.11357.
Parameters:
----------
classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dabnet(classes=classes, model_name="dabnet_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def _test():
import mxnet as mx
pretrained = False
fixed_size = True
in_size = (1024, 2048)
classes = 19
models = [
dabnet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != dabnet_cityscapes or weight_count == 756643)
batch = 4
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert (y.shape == (batch, classes, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 22,095 | 32.682927 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/cgnet.py | """
CGNet for image segmentation, implemented in Gluon.
Original paper: 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,'
https://arxiv.org/abs/1811.08201.
"""
__all__ = ['CGNet', 'cgnet_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import NormActivation, conv1x1, conv1x1_block, conv3x3_block, depthwise_conv3x3, SEBlock, Concurrent,\
DualPathSequential, InterpolationBlock, PReLU2
class CGBlock(HybridBlock):
"""
CGNet block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dilation : int
Dilation value.
se_reduction : int
SE-block reduction value.
down : bool
Whether to downsample.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
dilation,
se_reduction,
down,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(CGBlock, self).__init__(**kwargs)
self.down = down
if self.down:
mid1_channels = out_channels
mid2_channels = 2 * out_channels
else:
mid1_channels = out_channels // 2
mid2_channels = out_channels
with self.name_scope():
if self.down:
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
else:
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid1_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid1_channels)))
self.branches = Concurrent()
self.branches.add(depthwise_conv3x3(channels=mid1_channels))
self.branches.add(depthwise_conv3x3(
channels=mid1_channels,
padding=dilation,
dilation=dilation))
self.norm_activ = NormActivation(
in_channels=mid2_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(mid2_channels)))
if self.down:
self.conv2 = conv1x1(
in_channels=mid2_channels,
out_channels=out_channels)
self.se = SEBlock(
channels=out_channels,
reduction=se_reduction,
use_conv=False)
def hybrid_forward(self, F, x):
if not self.down:
identity = x
x = self.conv1(x)
x = self.branches(x)
x = self.norm_activ(x)
if self.down:
x = self.conv2(x)
x = self.se(x)
if not self.down:
x = x + identity
return x
class CGUnit(HybridBlock):
"""
CGNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
layers : int
Number of layers.
dilation : int
Dilation value.
se_reduction : int
SE-block reduction value.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
layers,
dilation,
se_reduction,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(CGUnit, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.down = CGBlock(
in_channels=in_channels,
out_channels=mid_channels,
dilation=dilation,
se_reduction=se_reduction,
down=True,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.blocks = nn.HybridSequential(prefix="")
for i in range(layers - 1):
self.blocks.add(CGBlock(
in_channels=mid_channels,
out_channels=mid_channels,
dilation=dilation,
se_reduction=se_reduction,
down=False,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
def hybrid_forward(self, F, x):
x = self.down(x)
y = self.blocks(x)
x = F.concat(y, x, dim=1) # NB: This differs from the original implementation.
return x
class CGStage(HybridBlock):
"""
CGNet stage.
Parameters:
----------
x_channels : int
Number of input/output channels for x.
y_in_channels : int
Number of input channels for y.
y_out_channels : int
Number of output channels for y.
layers : int
Number of layers in the unit.
dilation : int
Dilation for blocks.
se_reduction : int
SE-block reduction value for blocks.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
x_channels,
y_in_channels,
y_out_channels,
layers,
dilation,
se_reduction,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(CGStage, self).__init__(**kwargs)
self.use_x = (x_channels > 0)
self.use_unit = (layers > 0)
with self.name_scope():
if self.use_x:
self.x_down = nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1)
if self.use_unit:
self.unit = CGUnit(
in_channels=y_in_channels,
out_channels=(y_out_channels - x_channels),
layers=layers,
dilation=dilation,
se_reduction=se_reduction,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.norm_activ = NormActivation(
in_channels=y_out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(y_out_channels)))
def hybrid_forward(self, F, y, x=None):
if self.use_unit:
y = self.unit(y)
if self.use_x:
x = self.x_down(x)
y = F.concat(y, x, dim=1)
y = self.norm_activ(y)
return y, x
class CGInitBlock(HybridBlock):
"""
CGNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(CGInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
self.conv2 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
self.conv3 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=(lambda: PReLU2(out_channels)))
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class CGNet(HybridBlock):
"""
CGNet model from 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,'
https://arxiv.org/abs/1811.08201.
Parameters:
----------
layers : list of int
Number of layers for each unit.
channels : list of int
Number of output channels for each unit (for y-branch).
init_block_channels : int
Number of output channels for the initial unit.
dilations : list of int
Dilations for each unit.
se_reductions : list of int
SE-block reduction value for each unit.
cut_x : list of int
Whether to concatenate with x-branch for each unit.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
layers,
channels,
init_block_channels,
dilations,
se_reductions,
cut_x,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
classes=19,
**kwargs):
super(CGNet, self).__init__(**kwargs)
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
with self.name_scope():
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=0)
self.features.add(CGInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
y_in_channels = init_block_channels
for i, (layers_i, y_out_channels) in enumerate(zip(layers, channels)):
self.features.add(CGStage(
x_channels=in_channels if cut_x[i] == 1 else 0,
y_in_channels=y_in_channels,
y_out_channels=y_out_channels,
layers=layers_i,
dilation=dilations[i],
se_reduction=se_reductions[i],
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
y_in_channels = y_out_channels
self.classifier = conv1x1(
in_channels=y_in_channels,
out_channels=classes)
self.up = InterpolationBlock(scale_factor=8)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
y = self.features(x, x)
y = self.classifier(y)
y = self.up(y, in_size)
return y
def get_cgnet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create CGNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
layers = [0, 3, 21]
channels = [35, 131, 256]
dilations = [0, 2, 4]
se_reductions = [0, 8, 16]
cut_x = [1, 1, 0]
bn_epsilon = 1e-3
net = CGNet(
layers=layers,
channels=channels,
init_block_channels=init_block_channels,
dilations=dilations,
se_reductions=se_reductions,
cut_x=cut_x,
bn_epsilon=bn_epsilon,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def cgnet_cityscapes(classes=19, **kwargs):
"""
CGNet model for Cityscapes from 'CGNet: A Light-weight Context Guided Network for Semantic Segmentation,'
https://arxiv.org/abs/1811.08201.
Parameters:
----------
classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_cgnet(classes=classes, model_name="cgnet_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def _test():
import mxnet as mx
pretrained = False
fixed_size = True
in_size = (1024, 2048)
classes = 19
models = [
cgnet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != cgnet_cityscapes or weight_count == 496306)
batch = 4
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert (y.shape == (batch, classes, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 17,360 | 32.068571 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/wrn1bit_cifar.py | """
WRN-1bit for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Training wide residual networks for deployment using a single bit for each weight,'
https://arxiv.org/abs/1802.08530.
"""
__all__ = ['CIFARWRN1bit', 'wrn20_10_1bit_cifar10', 'wrn20_10_1bit_cifar100', 'wrn20_10_1bit_svhn',
'wrn20_10_32bit_cifar10', 'wrn20_10_32bit_cifar100', 'wrn20_10_32bit_svhn']
import os
import math
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
class Binarize(mx.autograd.Function):
"""
Fake sign op for 1-bit weights.
"""
def forward(self, x):
return math.sqrt(2.0 / (x.shape[1] * x.shape[2] * x.shape[3])) * x.sign()
def backward(self, dy):
return dy
class Conv2D1bit(nn.Conv2D):
"""
Standard convolution block with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding=1,
dilation=1,
groups=1,
use_bias=False,
binarized=False,
**kwargs):
super(Conv2D1bit, self).__init__(
in_channels=in_channels,
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
**kwargs)
self.binarized = binarized
def hybrid_forward(self, F, x, weight, bias=None):
weight_1bit = Binarize()(weight) if self.binarized else weight
bias_1bit = Binarize()(bias) if bias is not None and self.binarized else bias
return super(Conv2D1bit, self).hybrid_forward(F, x, weight=weight_1bit, bias=bias_1bit)
def conv1x1_1bit(in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
binarized=False):
"""
Convolution 1x1 layer with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
binarized : bool, default False
Whether to use binarization.
"""
return Conv2D1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
groups=groups,
use_bias=use_bias,
binarized=binarized)
def conv3x3_1bit(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
groups=1,
use_bias=False,
binarized=False):
"""
Convolution 3x3 layer with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
binarized : bool, default False
Whether to use binarization.
"""
return Conv2D1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
binarized=binarized)
class ConvBlock1bit(HybridBlock):
"""
Standard convolution block with Batch normalization and ReLU activation, and binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activate : bool, default True
Whether activate the convolution block.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
bn_affine=True,
bn_use_global_stats=False,
activate=True,
binarized=False,
**kwargs):
super(ConvBlock1bit, self).__init__(**kwargs)
self.activate = activate
with self.name_scope():
self.conv = Conv2D1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
binarized=binarized)
self.bn = nn.BatchNorm(
in_channels=out_channels,
center=bn_affine,
scale=bn_affine,
use_global_stats=bn_use_global_stats)
if self.activate:
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1_block_1bit(in_channels,
out_channels,
strides=1,
padding=0,
groups=1,
use_bias=False,
bn_affine=True,
bn_use_global_stats=False,
activate=True,
binarized=False):
"""
1x1 version of the standard convolution block with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activate : bool, default True
Whether activate the convolution block.
binarized : bool, default False
Whether to use binarization.
"""
return ConvBlock1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=padding,
groups=groups,
use_bias=use_bias,
bn_affine=bn_affine,
bn_use_global_stats=bn_use_global_stats,
activate=activate,
binarized=binarized)
class PreConvBlock1bit(HybridBlock):
"""
Convolution block with Batch normalization and ReLU pre-activation, and binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
activate : bool, default True
Whether activate the convolution block.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
bn_affine=True,
bn_use_global_stats=False,
return_preact=False,
activate=True,
binarized=False,
**kwargs):
super(PreConvBlock1bit, self).__init__(**kwargs)
self.return_preact = return_preact
self.activate = activate
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
center=bn_affine,
scale=bn_affine,
use_global_stats=bn_use_global_stats)
if self.activate:
self.activ = nn.Activation("relu")
self.conv = Conv2D1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
binarized=binarized)
def hybrid_forward(self, F, x):
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_conv3x3_block_1bit(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
bn_affine=True,
bn_use_global_stats=False,
return_preact=False,
activate=True,
binarized=False):
"""
3x3 version of the pre-activated convolution block with binarization.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
binarized : bool, default False
Whether to use binarization.
"""
return PreConvBlock1bit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
bn_affine=bn_affine,
bn_use_global_stats=bn_use_global_stats,
return_preact=return_preact,
activate=activate,
binarized=binarized)
class PreResBlock1bit(HybridBlock):
"""
Simple PreResNet block for residual path in ResNet unit (with binarization).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
binarized=False,
**kwargs):
super(PreResBlock1bit, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = pre_conv3x3_block_1bit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_affine=False,
bn_use_global_stats=bn_use_global_stats,
return_preact=False,
binarized=binarized)
self.conv2 = pre_conv3x3_block_1bit(
in_channels=out_channels,
out_channels=out_channels,
bn_affine=False,
bn_use_global_stats=bn_use_global_stats,
binarized=binarized)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class PreResUnit1bit(HybridBlock):
"""
PreResNet unit with residual connection (with binarization).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
binarized : bool, default False
Whether to use binarization.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
binarized=False,
**kwargs):
super(PreResUnit1bit, self).__init__(**kwargs)
self.resize_identity = (strides != 1)
with self.name_scope():
self.body = PreResBlock1bit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
binarized=binarized)
if self.resize_identity:
self.identity_pool = nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
identity = x
x = self.body(x)
if self.resize_identity:
identity = self.identity_pool(identity)
identity = F.concat(identity, F.zeros_like(identity), dim=1)
x = x + identity
return x
class PreResActivation(HybridBlock):
"""
PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_affine : bool, default True
Whether the BatchNorm layer learns affine parameters.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_affine=True,
bn_use_global_stats=False,
**kwargs):
super(PreResActivation, self).__init__(**kwargs)
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
center=bn_affine,
scale=bn_affine,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
return x
class CIFARWRN1bit(HybridBlock):
"""
WRN-1bit model for CIFAR from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
binarized : bool, default True
Whether to use binarization.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
binarized=True,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARWRN1bit, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_1bit(
in_channels=in_channels,
out_channels=init_block_channels,
binarized=binarized))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(PreResUnit1bit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
binarized=binarized))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats,
bn_affine=False))
self.output = nn.HybridSequential(prefix="")
self.output.add(conv1x1_block_1bit(
in_channels=in_channels,
out_channels=classes,
activate=False,
binarized=binarized))
self.output.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_wrn1bit_cifar(classes,
blocks,
width_factor,
binarized=True,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create WRN-1bit model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
width_factor : int
Wide scale factor for width of layers.
binarized : bool, default True
Whether to use binarization.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci * width_factor] * li for (ci, li) in zip(channels_per_layers, layers)]
init_block_channels *= width_factor
net = CIFARWRN1bit(
channels=channels,
init_block_channels=init_block_channels,
binarized=binarized,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def wrn20_10_1bit_cifar10(classes=10, **kwargs):
"""
WRN-20-10-1bit model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(classes=classes, blocks=20, width_factor=10, binarized=True,
model_name="wrn20_10_1bit_cifar10", **kwargs)
def wrn20_10_1bit_cifar100(classes=100, **kwargs):
"""
WRN-20-10-1bit model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(classes=classes, blocks=20, width_factor=10, binarized=True,
model_name="wrn20_10_1bit_cifar100", **kwargs)
def wrn20_10_1bit_svhn(classes=10, **kwargs):
"""
WRN-20-10-1bit model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(classes=classes, blocks=20, width_factor=10, binarized=True,
model_name="wrn20_10_1bit_svhn", **kwargs)
def wrn20_10_32bit_cifar10(classes=10, **kwargs):
"""
WRN-20-10-32bit model for CIFAR-10 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(classes=classes, blocks=20, width_factor=10, binarized=False,
model_name="wrn20_10_32bit_cifar10", **kwargs)
def wrn20_10_32bit_cifar100(classes=100, **kwargs):
"""
WRN-20-10-32bit model for CIFAR-100 from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(classes=classes, blocks=20, width_factor=10, binarized=False,
model_name="wrn20_10_32bit_cifar100", **kwargs)
def wrn20_10_32bit_svhn(classes=10, **kwargs):
"""
WRN-20-10-32bit model for SVHN from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_wrn1bit_cifar(classes=classes, blocks=20, width_factor=10, binarized=False,
model_name="wrn20_10_32bit_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(wrn20_10_1bit_cifar10, 10),
(wrn20_10_1bit_cifar100, 100),
(wrn20_10_1bit_svhn, 10),
(wrn20_10_32bit_cifar10, 10),
(wrn20_10_32bit_cifar100, 100),
(wrn20_10_32bit_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != wrn20_10_1bit_cifar10 or weight_count == 26737140)
assert (model != wrn20_10_1bit_cifar100 or weight_count == 26794920)
assert (model != wrn20_10_1bit_svhn or weight_count == 26737140)
assert (model != wrn20_10_32bit_cifar10 or weight_count == 26737140)
assert (model != wrn20_10_32bit_cifar100 or weight_count == 26794920)
assert (model != wrn20_10_32bit_svhn or weight_count == 26737140)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 28,170 | 32.657109 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/condensenet.py | """
CondenseNet for ImageNet-1K, implemented in Gluon.
Original paper: 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,'
https://arxiv.org/abs/1711.09224.
"""
__all__ = ['CondenseNet', 'condensenet74_c4_g4', 'condensenet74_c8_g8']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import ChannelShuffle
class CondenseSimpleConv(HybridBlock):
"""
CondenseNet specific simple convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int
Number of groups.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
groups,
bn_use_global_stats,
**kwargs):
super(CondenseSimpleConv, self).__init__(**kwargs)
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
use_bias=False,
in_channels=in_channels)
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
x = self.conv(x)
return x
def condense_simple_conv3x3(in_channels,
out_channels,
groups,
bn_use_global_stats):
"""
3x3 version of the CondenseNet specific simple convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
return CondenseSimpleConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=1,
padding=1,
groups=groups,
bn_use_global_stats=bn_use_global_stats)
class CondenseComplexConv(HybridBlock):
"""
CondenseNet specific complex convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int
Number of groups.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
groups,
bn_use_global_stats,
**kwargs):
super(CondenseComplexConv, self).__init__(**kwargs)
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
use_bias=False,
in_channels=in_channels)
self.c_shuffle = ChannelShuffle(
channels=out_channels,
groups=groups)
self.index = self.params.get(
"index",
grad_req="null",
shape=(in_channels,),
init="zeros",
allow_deferred_init=True,
differentiable=False)
def hybrid_forward(self, F, x, index):
x = F.take(x, index, axis=1)
x = self.bn(x)
x = self.activ(x)
x = self.conv(x)
x = self.c_shuffle(x)
return x
def condense_complex_conv1x1(in_channels,
out_channels,
groups,
bn_use_global_stats):
"""
1x1 version of the CondenseNet specific complex convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
return CondenseComplexConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=1,
padding=0,
groups=groups,
bn_use_global_stats=bn_use_global_stats)
class CondenseUnit(HybridBlock):
"""
CondenseNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
groups,
bn_use_global_stats,
**kwargs):
super(CondenseUnit, self).__init__(**kwargs)
bottleneck_size = 4
inc_channels = out_channels - in_channels
mid_channels = inc_channels * bottleneck_size
with self.name_scope():
self.conv1 = condense_complex_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
groups=groups,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = condense_simple_conv3x3(
in_channels=mid_channels,
out_channels=inc_channels,
groups=groups,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
x = F.concat(identity, x, dim=1)
return x
class TransitionBlock(HybridBlock):
"""
CondenseNet's auxiliary block, which can be treated as the initial part of the DenseNet unit, triggered only in the
first unit of each stage.
"""
def __init__(self,
**kwargs):
super(TransitionBlock, self).__init__(**kwargs)
with self.name_scope():
self.pool = nn.AvgPool2D(
pool_size=2,
strides=2,
padding=0)
def hybrid_forward(self, F, x):
x = self.pool(x)
return x
class CondenseInitBlock(HybridBlock):
"""
CondenseNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(CondenseInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=3,
strides=2,
padding=1,
use_bias=False,
in_channels=in_channels)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
class PostActivation(HybridBlock):
"""
CondenseNet final block, which performs the same function of postactivation as in PreResNet.
Parameters:
----------
in_channels : int
Number of input channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_use_global_stats,
**kwargs):
super(PostActivation, self).__init__(**kwargs)
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
return x
class CondenseDense(HybridBlock):
"""
CondenseNet specific dense block.
Parameters:
----------
units : int
Number of output channels.
in_units : int
Number of input channels.
drop_rate : float
Fraction of input channels for drop.
"""
def __init__(self,
units,
in_units,
drop_rate=0.5,
**kwargs):
super(CondenseDense, self).__init__(**kwargs)
drop_in_units = int(in_units * drop_rate)
with self.name_scope():
self.dense = nn.Dense(
units=units,
in_units=drop_in_units)
self.index = self.params.get(
"index",
grad_req="null",
shape=(drop_in_units,),
init="zeros",
allow_deferred_init=True,
differentiable=False)
def hybrid_forward(self, F, x, index):
x = F.take(x, index, axis=1)
x = self.dense(x)
return x
class CondenseNet(HybridBlock):
"""
CondenseNet model (converted) from 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,'
https://arxiv.org/abs/1711.09224.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
groups : int
Number of groups in convolution layers.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
groups,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(CondenseNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(CondenseInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
if i != 0:
stage.add(TransitionBlock())
for j, out_channels in enumerate(channels_per_stage):
stage.add(CondenseUnit(
in_channels=in_channels,
out_channels=out_channels,
groups=groups,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(PostActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(CondenseDense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_condensenet(num_layers,
groups=4,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create CondenseNet (converted) model with specific parameters.
Parameters:
----------
num_layers : int
Number of layers.
groups : int
Number of groups in convolution layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if num_layers == 74:
init_block_channels = 16
layers = [4, 6, 8, 10, 8]
growth_rates = [8, 16, 32, 64, 128]
else:
raise ValueError("Unsupported CondenseNet version with number of layers {}".format(num_layers))
from functools import reduce
channels = reduce(lambda xi, yi:
xi + [reduce(lambda xj, yj:
xj + [xj[-1] + yj],
[yi[1]] * yi[0],
[xi[-1][-1]])[1:]],
zip(layers, growth_rates),
[[init_block_channels]])[1:]
net = CondenseNet(
channels=channels,
init_block_channels=init_block_channels,
groups=groups,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def condensenet74_c4_g4(**kwargs):
"""
CondenseNet-74 (C=G=4) model (converted) from 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,'
https://arxiv.org/abs/1711.09224.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_condensenet(num_layers=74, groups=4, model_name="condensenet74_c4_g4", **kwargs)
def condensenet74_c8_g8(**kwargs):
"""
CondenseNet-74 (C=G=8) model (converted) from 'CondenseNet: An Efficient DenseNet using Learned Group Convolutions,'
https://arxiv.org/abs/1711.09224.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_condensenet(num_layers=74, groups=8, model_name="condensenet74_c8_g8", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
condensenet74_c4_g4,
condensenet74_c8_g8,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
assert (model != condensenet74_c4_g4 or weight_count == 4773944)
assert (model != condensenet74_c8_g8 or weight_count == 2935416)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 17,278 | 30.245931 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/crunet.py | """
CRU-Net, implemented in Gluon.
Original paper: 'Sharing Residual Units Through Collective Tensor Factorization To Improve Deep Neural Networks,'
https://www.ijcai.org/proceedings/2018/88.
"""
__all__ = ['CRUNet', 'crunet56', 'crunet116']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import pre_conv1x1_block, pre_conv3x3_block
from .resnet import ResInitBlock
from .preresnet import PreResActivation
def cru_conv3x3(in_channels,
out_channels,
strides=1,
padding=1,
groups=1,
use_bias=False,
conv_params=None):
"""
CRU-Net specific convolution 3x3 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2D(
channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
groups=groups,
use_bias=use_bias,
in_channels=in_channels,
params=conv_params)
class CRUConvBlock(HybridBlock):
"""
CRU-Net specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
conv_params : ParameterDict, default None
Weights for the convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
groups=1,
bn_use_global_stats=False,
return_preact=False,
conv_params=None,
**kwargs):
super(CRUConvBlock, self).__init__(**kwargs)
self.return_preact = return_preact
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
use_bias=False,
in_channels=in_channels,
params=conv_params)
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def cru_conv1x1_block(in_channels,
out_channels,
strides=1,
bn_use_global_stats=False,
return_preact=False,
conv_params=None):
"""
1x1 version of the CRU-Net specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation.
conv_params : ParameterDict, default None
Weights for the convolution layer.
"""
return CRUConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
bn_use_global_stats=bn_use_global_stats,
return_preact=return_preact,
conv_params=conv_params)
class ResBottleneck(HybridBlock):
"""
Pre-ResNeXt bottleneck block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bn_use_global_stats,
**kwargs):
super(ResBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=group_width,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv3x3_block(
in_channels=group_width,
out_channels=group_width,
strides=strides,
groups=cardinality,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = pre_conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class CRUBottleneck(HybridBlock):
"""
CRU-Net bottleneck block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
group_width: int
Group width parameter.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
conv1_params : ParameterDict, default None
Weights for the convolution layer #1.
conv2_params : ParameterDict, default None
Weights for the convolution layer #2.
"""
def __init__(self,
in_channels,
out_channels,
strides,
group_width,
bn_use_global_stats,
conv1_params=None,
conv2_params=None,
**kwargs):
super(CRUBottleneck, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = cru_conv1x1_block(
in_channels=in_channels,
out_channels=group_width,
bn_use_global_stats=bn_use_global_stats,
conv_params=conv1_params)
self.conv2 = cru_conv3x3(
in_channels=group_width,
out_channels=group_width,
strides=strides,
groups=group_width,
conv_params=conv2_params)
self.conv3 = pre_conv1x1_block(
in_channels=group_width,
out_channels=group_width,
bn_use_global_stats=bn_use_global_stats)
self.conv4 = pre_conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
return x
class ResUnit(HybridBlock):
"""
CRU-Net residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bn_use_global_stats,
**kwargs):
super(ResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = pre_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
return x
class CRUUnit(HybridBlock):
"""
CRU-Net collective residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
group_width: int
Group width parameter.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
conv1_params : ParameterDict, default None
Weights for the convolution layer #1.
conv2_params : ParameterDict, default None
Weights for the convolution layer #2.
"""
def __init__(self,
in_channels,
out_channels,
strides,
group_width,
bn_use_global_stats,
conv1_params=None,
conv2_params=None,
**kwargs):
super(CRUUnit, self).__init__(**kwargs)
assert (strides == 1) or ((conv1_params is None) and (conv2_params is None))
self.resize_input = (in_channels != out_channels)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if self.resize_input:
self.input_conv = pre_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
self.body = CRUBottleneck(
in_channels=out_channels,
out_channels=out_channels,
strides=strides,
group_width=group_width,
bn_use_global_stats=bn_use_global_stats,
conv1_params=conv1_params,
conv2_params=conv2_params)
if self.resize_identity:
self.identity_conv = cru_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
conv_params=self.input_conv.conv.params)
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
if self.resize_input:
x = self.input_conv(x)
x = self.body(x)
x = x + identity
return x
class CRUNet(HybridBlock):
"""
CRU-Net model from 'Sharing Residual Units Through Collective Tensor Factorization To Improve Deep Neural Networks,'
https://www.ijcai.org/proceedings/2018/88.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
group_widths: list of int
List of group width parameters.
refresh_steps: list of int
List of refresh step parameters.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
group_widths,
refresh_steps,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(CRUNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
group_width = group_widths[i]
refresh_step = refresh_steps[i]
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
if group_width != 0:
if ((refresh_step == 0) and (j == 0)) or ((refresh_step != 0) and (j % refresh_step == 0)):
conv1_params = None
conv2_params = None
unit = CRUUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
group_width=group_width,
bn_use_global_stats=bn_use_global_stats,
conv1_params=conv1_params,
conv2_params=conv2_params)
if conv1_params is None:
conv1_params = unit.body.conv1.conv.params
conv2_params = unit.body.conv2.params
stage.add(unit)
else:
stage.add(ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_crunet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create CRU-Net model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
cardinality = 32
bottleneck_width = 4
if blocks == 56:
layers = [3, 4, 6, 3]
group_widths = [0, 0, 640, 0]
refresh_steps = [0, 0, 0, 0]
elif blocks == 116:
layers = [3, 6, 18, 3]
group_widths = [0, 352, 704, 0]
refresh_steps = [0, 0, 6, 0]
else:
raise ValueError("Unsupported CRU-Net with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = CRUNet(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
group_widths=group_widths,
refresh_steps=refresh_steps,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def crunet56(**kwargs):
"""
CRU-Net-56 model from 'Sharing Residual Units Through Collective Tensor Factorization To Improve Deep Neural
Networks,' https://www.ijcai.org/proceedings/2018/88.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_crunet(blocks=56, model_name="crunet56", **kwargs)
def crunet116(**kwargs):
"""
CRU-Net-116 model from 'Sharing Residual Units Through Collective Tensor Factorization To Improve Deep Neural
Networks,' https://www.ijcai.org/proceedings/2018/88.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_crunet(blocks=116, model_name="crunet116", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
crunet56,
crunet116,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != crunet56 or weight_count == 25609384)
assert (model != crunet116 or weight_count == 43656136)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 21,164 | 32.436019 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/fbnet.py | """
FBNet for ImageNet-1K, implemented in Gluon.
Original paper: 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search,'
https://arxiv.org/abs/1812.03443.
"""
__all__ = ['FBNet', 'fbnet_cb']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block
class FBNetUnit(HybridBlock):
"""
FBNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : int
Expansion factor for each unit.
activation : str, default 'relu'
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_epsilon,
bn_use_global_stats,
use_kernel3,
exp_factor,
activation="relu",
**kwargs):
super(FBNetUnit, self).__init__(**kwargs)
assert (exp_factor >= 1)
self.residual = (in_channels == out_channels) and (strides == 1)
self.use_exp_conv = True
mid_channels = exp_factor * in_channels
with self.name_scope():
if self.use_exp_conv:
self.exp_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
if use_kernel3:
self.conv1 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
else:
self.conv1 = dwconv5x5_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.exp_conv(x)
x = self.conv1(x)
x = self.conv2(x)
if self.residual:
x = x + identity
return x
class FBNetInitBlock(HybridBlock):
"""
FBNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(FBNetInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = FBNetUnit(
in_channels=out_channels,
out_channels=out_channels,
strides=1,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
use_kernel3=True,
exp_factor=1)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class FBNet(HybridBlock):
"""
FBNet model from 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search,'
https://arxiv.org/abs/1812.03443.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernels3,
exp_factors,
bn_epsilon=1e-5,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(FBNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(FBNetInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) else 1
use_kernel3 = kernels3[i][j] == 1
exp_factor = exp_factors[i][j]
stage.add(FBNetUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
use_kernel3=use_kernel3,
exp_factor=exp_factor))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_fbnet(version,
bn_epsilon=1e-5,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create FBNet model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('a', 'b' or 'c').
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version == "c":
init_block_channels = 16
final_block_channels = 1984
channels = [[24, 24, 24], [32, 32, 32, 32], [64, 64, 64, 64, 112, 112, 112, 112], [184, 184, 184, 184, 352]]
kernels3 = [[1, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1]]
exp_factors = [[6, 1, 1], [6, 3, 6, 6], [6, 3, 6, 6, 6, 6, 6, 3], [6, 6, 6, 6, 6]]
else:
raise ValueError("Unsupported FBNet version {}".format(version))
net = FBNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernels3=kernels3,
exp_factors=exp_factors,
bn_epsilon=bn_epsilon,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def fbnet_cb(**kwargs):
"""
FBNet-Cb model (bn_epsilon=1e-3) from 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural
Architecture Search,' https://arxiv.org/abs/1812.03443.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fbnet(version="c", bn_epsilon=1e-3, model_name="fbnet_cb", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
fbnet_cb,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != fbnet_cb or weight_count == 5572200)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 11,752 | 33.567647 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/visemenet.py | """
VisemeNet for speech-driven facial animation, implemented in Gluon.
Original paper: 'VisemeNet: Audio-Driven Animator-Centric Speech Animation,' https://arxiv.org/abs/1805.09488.
"""
__all__ = ['VisemeNet', 'visemenet20']
import os
from mxnet import cpu
from mxnet.gluon import nn, rnn, HybridBlock
from .common import DenseBlock
class VisemeDenseBranch(HybridBlock):
"""
VisemeNet dense branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of middle/output channels.
"""
def __init__(self,
in_channels,
out_channels_list,
**kwargs):
super(VisemeDenseBranch, self).__init__(**kwargs)
with self.name_scope():
self.branch = nn.HybridSequential(prefix="")
with self.branch.name_scope():
for out_channels in out_channels_list[:-1]:
self.branch.add(DenseBlock(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
use_bn=True))
in_channels = out_channels
self.final_fc = nn.Dense(
units=out_channels_list[-1],
in_units=in_channels)
def hybrid_forward(self, F, x):
x = self.branch(x)
y = self.final_fc(x)
return y, x
class VisemeRnnBranch(HybridBlock):
"""
VisemeNet RNN branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of middle/output channels.
rnn_num_layers : int
Number of RNN layers.
dropout_rate : float
Dropout rate.
"""
def __init__(self,
in_channels,
out_channels_list,
rnn_num_layers,
dropout_rate,
**kwargs):
super(VisemeRnnBranch, self).__init__(**kwargs)
with self.name_scope():
self.rnn = rnn.LSTM(
hidden_size=out_channels_list[0],
num_layers=rnn_num_layers,
dropout=dropout_rate,
input_size=in_channels)
self.fc_branch = VisemeDenseBranch(
in_channels=out_channels_list[0],
out_channels_list=out_channels_list[1:])
def hybrid_forward(self, F, x):
x = self.rnn(x)
x = x[:, -1, :]
y, _ = self.fc_branch(x)
return y
class VisemeNet(HybridBlock):
"""
VisemeNet model from 'VisemeNet: Audio-Driven Animator-Centric Speech Animation,' https://arxiv.org/abs/1805.09488.
Parameters:
----------
audio_features : int, default 195
Number of audio features (characters/sounds).
audio_window_size : int, default 8
Size of audio window (for time related audio features).
stage2_window_size : int, default 64
Size of window for stage #2.
num_face_ids : int, default 76
Number of face IDs.
num_landmarks : int, default 76
Number of landmarks.
num_phonemes : int, default 21
Number of phonemes.
num_visemes : int, default 20
Number of visemes.
dropout_rate : float, default 0.5
Dropout rate for RNNs.
"""
def __init__(self,
audio_features=195,
audio_window_size=8,
stage2_window_size=64,
num_face_ids=76,
num_landmarks=76,
num_phonemes=21,
num_visemes=20,
dropout_rate=0.5,
**kwargs):
super(VisemeNet, self).__init__(**kwargs)
stage1_rnn_hidden_size = 256
stage1_fc_mid_channels = 256
stage2_rnn_in_features = (audio_features + num_landmarks + stage1_fc_mid_channels) * \
stage2_window_size // audio_window_size
self.audio_window_size = audio_window_size
self.stage2_window_size = stage2_window_size
with self.name_scope():
self.stage1_rnn = rnn.LSTM(
hidden_size=stage1_rnn_hidden_size,
num_layers=3,
dropout=dropout_rate,
input_size=audio_features)
self.lm_branch = VisemeDenseBranch(
in_channels=(stage1_rnn_hidden_size + num_face_ids),
out_channels_list=[stage1_fc_mid_channels, num_landmarks])
self.ph_branch = VisemeDenseBranch(
in_channels=(stage1_rnn_hidden_size + num_face_ids),
out_channels_list=[stage1_fc_mid_channels, num_phonemes])
self.cls_branch = VisemeRnnBranch(
in_channels=stage2_rnn_in_features,
out_channels_list=[256, 200, num_visemes],
rnn_num_layers=1,
dropout_rate=dropout_rate)
self.reg_branch = VisemeRnnBranch(
in_channels=stage2_rnn_in_features,
out_channels_list=[256, 200, 100, num_visemes],
rnn_num_layers=3,
dropout_rate=dropout_rate)
self.jali_branch = VisemeRnnBranch(
in_channels=stage2_rnn_in_features,
out_channels_list=[128, 200, 2],
rnn_num_layers=3,
dropout_rate=dropout_rate)
def hybrid_forward(self, F, x, pid):
y = self.stage1_rnn(x)
y = y[:, -1, :]
y = F.concat(y, pid, dim=1)
lm, _ = self.lm_branch(y)
lm += pid
ph, ph1 = self.ph_branch(y)
z = F.concat(lm, ph1, dim=1)
z2 = F.concat(z, x[:, self.audio_window_size // 2, :], dim=1)
n_net2_input = z2.shape[1]
z2 = F.concat(
F.zeros((self.stage2_window_size // 2, n_net2_input)),
z2,
dim=0)
z = F.stack(
*[z2[i:i + self.stage2_window_size].reshape(
(self.audio_window_size, n_net2_input * self.stage2_window_size // self.audio_window_size))
for i in range(z2.shape[0] - self.stage2_window_size)],
axis=0)
cls = self.cls_branch(z)
reg = self.reg_branch(z)
jali = self.jali_branch(z)
return cls, reg, jali
def get_visemenet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create VisemeNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = VisemeNet(
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def visemenet20(**kwargs):
"""
VisemeNet model for 20 visemes (without co-articulation rules) from 'VisemeNet: Audio-Driven Animator-Centric
Speech Animation,' https://arxiv.org/abs/1805.09488.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_visemenet(model_name="visemenet20", **kwargs)
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def _test():
import mxnet as mx
pretrained = False
models = [
visemenet20,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != visemenet20 or weight_count == 14574303)
batch = 34
audio_window_size = 8
audio_features = 195
num_face_ids = 76
num_visemes = 20
x = mx.nd.random.normal(shape=(batch, audio_window_size, audio_features), ctx=ctx)
pid = mx.nd.full(shape=(batch, num_face_ids), val=3, ctx=ctx)
y1, y2, y3 = net(x, pid)
assert (y1.shape[0] == y2.shape[0] == y3.shape[0])
assert (y1.shape[1] == y2.shape[1] == num_visemes)
assert (y3.shape[1] == 2)
if __name__ == "__main__":
_test()
| 9,324 | 31.155172 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/fractalnet_cifar.py | """
FractalNet for CIFAR, implemented in Gluon.
Original paper: 'FractalNet: Ultra-Deep Neural Networks without Residuals,' https://arxiv.org/abs/1605.07648.
"""
__all__ = ['CIFARFractalNet', 'fractalnet_cifar10', 'fractalnet_cifar100']
import os
import numpy as np
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import ParametricSequential
class DropConvBlock(HybridBlock):
"""
Convolution block with Batch normalization, ReLU activation, and Dropout layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
use_bias=False,
bn_use_global_stats=False,
dropout_prob=0.0,
**kwargs):
super(DropConvBlock, self).__init__(**kwargs)
self.use_dropout = (dropout_prob != 0.0)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=use_bias,
in_channels=in_channels)
self.bn = nn.BatchNorm(
in_channels=out_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_prob)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.bn(x)
x = self.activ(x)
if self.use_dropout:
x = self.dropout(x)
return x
def drop_conv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
use_bias=False,
bn_use_global_stats=False,
dropout_prob=0.0,
**kwargs):
"""
3x3 version of the convolution block with dropout.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
"""
return DropConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
use_bias=use_bias,
bn_use_global_stats=bn_use_global_stats,
dropout_prob=dropout_prob,
**kwargs)
class FractalBlock(HybridBlock):
"""
FractalNet block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
num_columns : int
Number of columns in each block.
loc_drop_prob : float
Local drop path probability.
dropout_prob : float
Probability of dropout.
"""
def __init__(self,
in_channels,
out_channels,
num_columns,
loc_drop_prob,
dropout_prob,
**kwargs):
super(FractalBlock, self).__init__(**kwargs)
assert (num_columns >= 1)
self.num_columns = num_columns
self.loc_drop_prob = loc_drop_prob
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
depth = 2 ** (num_columns - 1)
for i in range(depth):
level_block_i = nn.HybridSequential(prefix='block{}_'.format(i + 1))
for j in range(self.num_columns):
column_step_j = 2 ** j
if (i + 1) % column_step_j == 0:
in_channels_ij = in_channels if (i + 1 == column_step_j) else out_channels
level_block_i.add(drop_conv3x3_block(
in_channels=in_channels_ij,
out_channels=out_channels,
dropout_prob=dropout_prob))
self.blocks.add(level_block_i)
@staticmethod
def calc_drop_mask(batch_size,
glob_num_columns,
curr_num_columns,
max_num_columns,
loc_drop_prob):
"""
Calculate drop path mask.
Parameters:
----------
batch_size : int
Size of batch.
glob_num_columns : int
Number of columns in global drop path mask.
curr_num_columns : int
Number of active columns in the current level of block.
max_num_columns : int
Number of columns for all network.
loc_drop_prob : float
Local drop path probability.
Returns:
-------
np.array
Resulted mask.
"""
glob_batch_size = glob_num_columns.shape[0]
glob_drop_mask = np.zeros((curr_num_columns, glob_batch_size), dtype=np.float32)
glob_drop_num_columns = glob_num_columns - (max_num_columns - curr_num_columns)
glob_drop_indices = np.where(glob_drop_num_columns >= 0)[0]
glob_drop_mask[glob_drop_num_columns[glob_drop_indices], glob_drop_indices] = 1.0
loc_batch_size = batch_size - glob_batch_size
loc_drop_mask = np.random.binomial(
n=1,
p=(1.0 - loc_drop_prob),
size=(curr_num_columns, loc_batch_size)).astype(np.float32)
alive_count = loc_drop_mask.sum(axis=0)
dead_indices = np.where(alive_count == 0.0)[0]
loc_drop_mask[np.random.randint(0, curr_num_columns, size=dead_indices.shape), dead_indices] = 1.0
drop_mask = np.concatenate((glob_drop_mask, loc_drop_mask), axis=1)
return drop_mask
@staticmethod
def join_outs(F,
raw_outs,
glob_num_columns,
num_columns,
loc_drop_prob,
training):
"""
Join outputs for current level of block.
Parameters:
----------
F : namespace
Symbol or NDArray namespace.
raw_outs : list of Tensor
Current outputs from active columns.
glob_num_columns : int
Number of columns in global drop path mask.
num_columns : int
Number of columns for all network.
loc_drop_prob : float
Local drop path probability.
training : bool
Whether training mode for network.
Returns:
-------
NDArray
Joined output.
"""
curr_num_columns = len(raw_outs)
out = F.stack(*raw_outs, axis=0)
assert (out.shape[0] == curr_num_columns)
if training:
batch_size = out.shape[1]
batch_mask = FractalBlock.calc_drop_mask(
batch_size=batch_size,
glob_num_columns=glob_num_columns,
curr_num_columns=curr_num_columns,
max_num_columns=num_columns,
loc_drop_prob=loc_drop_prob)
batch_mask = mx.nd.array(batch_mask, ctx=out.context)
assert (batch_mask.shape[0] == curr_num_columns)
assert (batch_mask.shape[1] == batch_size)
batch_mask = batch_mask.expand_dims(2).expand_dims(3).expand_dims(4)
masked_out = out * batch_mask
num_alive = batch_mask.sum(axis=0).asnumpy()
num_alive[num_alive == 0.0] = 1.0
num_alive = mx.nd.array(num_alive, ctx=out.context)
out = masked_out.sum(axis=0) / num_alive
else:
out = out.mean(axis=0)
return out
def hybrid_forward(self, F, x, glob_num_columns):
outs = [x] * self.num_columns
for level_block_i in self.blocks._children.values():
outs_i = []
for j, block_ij in enumerate(level_block_i._children.values()):
input_i = outs[j]
outs_i.append(block_ij(input_i))
joined_out = FractalBlock.join_outs(
F=F,
raw_outs=outs_i[::-1],
glob_num_columns=glob_num_columns,
num_columns=self.num_columns,
loc_drop_prob=self.loc_drop_prob,
training=mx.autograd.is_training())
len_level_block_i = len(level_block_i._children.values())
for j in range(len_level_block_i):
outs[j] = joined_out
return outs[0]
class FractalUnit(HybridBlock):
"""
FractalNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
num_columns : int
Number of columns in each block.
loc_drop_prob : float
Local drop path probability.
dropout_prob : float
Probability of dropout.
"""
def __init__(self,
in_channels,
out_channels,
num_columns,
loc_drop_prob,
dropout_prob,
**kwargs):
super(FractalUnit, self).__init__(**kwargs)
with self.name_scope():
self.block = FractalBlock(
in_channels=in_channels,
out_channels=out_channels,
num_columns=num_columns,
loc_drop_prob=loc_drop_prob,
dropout_prob=dropout_prob)
self.pool = nn.MaxPool2D(
pool_size=2,
strides=2)
def hybrid_forward(self, F, x, glob_num_columns):
x = self.block(x, glob_num_columns)
x = self.pool(x)
return x
class CIFARFractalNet(HybridBlock):
"""
FractalNet model for CIFAR from 'FractalNet: Ultra-Deep Neural Networks without Residuals,'
https://arxiv.org/abs/1605.07648.
Parameters:
----------
channels : list of int
Number of output channels for each unit.
num_columns : int
Number of columns in each block.
dropout_probs : list of float
Probability of dropout in each block.
loc_drop_prob : float
Local drop path probability.
glob_drop_ratio : float
Global drop part fraction.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
num_columns,
dropout_probs,
loc_drop_prob,
glob_drop_ratio,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARFractalNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.glob_drop_ratio = glob_drop_ratio
self.num_columns = num_columns
with self.name_scope():
self.features = ParametricSequential(prefix="")
for i, out_channels in enumerate(channels):
dropout_prob = dropout_probs[i]
self.features.add(FractalUnit(
in_channels=in_channels,
out_channels=out_channels,
num_columns=num_columns,
loc_drop_prob=loc_drop_prob,
dropout_prob=dropout_prob))
in_channels = out_channels
self.output = nn.Dense(
units=classes,
in_units=in_channels)
def hybrid_forward(self, F, x):
glob_batch_size = int(x.shape[0] * self.glob_drop_ratio)
glob_num_columns = np.random.randint(0, self.num_columns, size=(glob_batch_size,))
x = self.features(x, glob_num_columns)
x = self.output(x)
return x
def get_fractalnet_cifar(num_classes,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create WRN model for CIFAR with specific parameters.
Parameters:
----------
num_classes : int
Number of classification classes.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
dropout_probs = (0.0, 0.1, 0.2, 0.3, 0.4)
channels = [64 * (2 ** (i if i != len(dropout_probs) - 1 else i - 1)) for i in range(len(dropout_probs))]
num_columns = 3
loc_drop_prob = 0.15
glob_drop_ratio = 0.5
net = CIFARFractalNet(
channels=channels,
num_columns=num_columns,
dropout_probs=dropout_probs,
loc_drop_prob=loc_drop_prob,
glob_drop_ratio=glob_drop_ratio,
classes=num_classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def fractalnet_cifar10(num_classes=10, **kwargs):
"""
FractalNet model for CIFAR-10 from 'FractalNet: Ultra-Deep Neural Networks without Residuals,'
https://arxiv.org/abs/1605.07648.
Parameters:
----------
num_classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fractalnet_cifar(num_classes=num_classes, model_name="fractalnet_cifar10", **kwargs)
def fractalnet_cifar100(num_classes=100, **kwargs):
"""
FractalNet model for CIFAR-100 from 'FractalNet: Ultra-Deep Neural Networks without Residuals,'
https://arxiv.org/abs/1605.07648.
Parameters:
----------
num_classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fractalnet_cifar(num_classes=num_classes, model_name="fractalnet_cifar100", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(fractalnet_cifar10, 10),
(fractalnet_cifar100, 100),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != fractalnet_cifar10 or weight_count == 33724618)
assert (model != fractalnet_cifar100 or weight_count == 33770788)
x = mx.nd.zeros((14, 3, 32, 32), ctx=ctx)
y = net(x)
# with mx.autograd.record():
# y = net(x)
# y.backward()
assert (y.shape == (14, classes))
if __name__ == "__main__":
_test()
| 17,294 | 32.195777 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/mobilenetv3.py | """
MobileNetV3 for ImageNet-1K, implemented in Gluon.
Original paper: 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
"""
__all__ = ['MobileNetV3', 'mobilenetv3_small_w7d20', 'mobilenetv3_small_wd2', 'mobilenetv3_small_w3d4',
'mobilenetv3_small_w1', 'mobilenetv3_small_w5d4', 'mobilenetv3_large_w7d20', 'mobilenetv3_large_wd2',
'mobilenetv3_large_w3d4', 'mobilenetv3_large_w1', 'mobilenetv3_large_w5d4']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import round_channels, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block, SEBlock,\
HSwish
class MobileNetV3Unit(HybridBlock):
"""
MobileNetV3 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
exp_channels : int
Number of middle (expanded) channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
activation : str
Activation function or name of activation function.
use_se : bool
Whether to use SE-module.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
"""
def __init__(self,
in_channels,
out_channels,
exp_channels,
strides,
use_kernel3,
activation,
use_se,
bn_use_global_stats=False,
**kwargs):
super(MobileNetV3Unit, self).__init__(**kwargs)
assert (exp_channels >= out_channels)
self.residual = (in_channels == out_channels) and (strides == 1)
self.use_se = use_se
self.use_exp_conv = exp_channels != out_channels
mid_channels = exp_channels
with self.name_scope():
if self.use_exp_conv:
self.exp_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
if use_kernel3:
self.conv1 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
else:
self.conv1 = dwconv5x5_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=activation)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=4,
round_mid=True,
out_activation="hsigmoid")
self.conv2 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
if self.use_exp_conv:
x = self.exp_conv(x)
x = self.conv1(x)
if self.use_se:
x = self.se(x)
x = self.conv2(x)
if self.residual:
x = x + identity
return x
class MobileNetV3FinalBlock(HybridBlock):
"""
MobileNetV3 final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_se : bool
Whether to use SE-module.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
"""
def __init__(self,
in_channels,
out_channels,
use_se,
bn_use_global_stats=False,
**kwargs):
super(MobileNetV3FinalBlock, self).__init__(**kwargs)
self.use_se = use_se
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation="hswish")
if self.use_se:
self.se = SEBlock(
channels=out_channels,
reduction=4,
round_mid=True,
out_activation="hsigmoid")
def hybrid_forward(self, F, x):
x = self.conv(x)
if self.use_se:
x = self.se(x)
return x
class MobileNetV3Classifier(HybridBlock):
"""
MobileNetV3 classifier.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
dropout_rate,
**kwargs):
super(MobileNetV3Classifier, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
with self.name_scope():
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.activ = HSwish()
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.activ(x)
if self.use_dropout:
x = self.dropout(x)
x = self.conv2(x)
return x
class MobileNetV3(HybridBlock):
"""
MobileNetV3 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
exp_channels : list of list of int
Number of middle (expanded) channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
classifier_mid_channels : int
Number of middle channels for classifier.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
use_relu : list of list of int/bool
Using ReLU activation flag for each unit.
use_se : list of list of int/bool
Using SE-block flag for each unit.
first_stride : bool
Whether to use stride for the first stage.
final_use_se : bool
Whether to use SE-module in the final block.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
exp_channels,
init_block_channels,
final_block_channels,
classifier_mid_channels,
kernels3,
use_relu,
use_se,
first_stride,
final_use_se,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(MobileNetV3, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
activation="hswish"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
exp_channels_ij = exp_channels[i][j]
strides = 2 if (j == 0) and ((i != 0) or first_stride) else 1
use_kernel3 = kernels3[i][j] == 1
activation = "relu" if use_relu[i][j] == 1 else "hswish"
use_se_flag = use_se[i][j] == 1
stage.add(MobileNetV3Unit(
in_channels=in_channels,
out_channels=out_channels,
exp_channels=exp_channels_ij,
use_kernel3=use_kernel3,
strides=strides,
activation=activation,
use_se=use_se_flag,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(MobileNetV3FinalBlock(
in_channels=in_channels,
out_channels=final_block_channels,
use_se=final_use_se,
bn_use_global_stats=bn_use_global_stats))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(MobileNetV3Classifier(
in_channels=in_channels,
out_channels=classes,
mid_channels=classifier_mid_channels,
dropout_rate=0.2))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_mobilenetv3(version,
width_scale,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create MobileNetV3 model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('small' or 'large').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version == "small":
init_block_channels = 16
channels = [[16], [24, 24], [40, 40, 40, 48, 48], [96, 96, 96]]
exp_channels = [[16], [72, 88], [96, 240, 240, 120, 144], [288, 576, 576]]
kernels3 = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]]
use_relu = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]]
use_se = [[1], [0, 0], [1, 1, 1, 1, 1], [1, 1, 1]]
first_stride = True
final_block_channels = 576
elif version == "large":
init_block_channels = 16
channels = [[16], [24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]]
exp_channels = [[16], [64, 72], [72, 120, 120], [240, 200, 184, 184, 480, 672], [672, 960, 960]]
kernels3 = [[1], [1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]]
use_relu = [[1], [1, 1], [1, 1, 1], [0, 0, 0, 0, 0, 0], [0, 0, 0]]
use_se = [[0], [0, 0], [1, 1, 1], [0, 0, 0, 0, 1, 1], [1, 1, 1]]
first_stride = False
final_block_channels = 960
else:
raise ValueError("Unsupported MobileNetV3 version {}".format(version))
final_use_se = False
classifier_mid_channels = 1280
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels]
exp_channels = [[round_channels(cij * width_scale) for cij in ci] for ci in exp_channels]
init_block_channels = round_channels(init_block_channels * width_scale)
if width_scale > 1.0:
final_block_channels = round_channels(final_block_channels * width_scale)
net = MobileNetV3(
channels=channels,
exp_channels=exp_channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
classifier_mid_channels=classifier_mid_channels,
kernels3=kernels3,
use_relu=use_relu,
use_se=use_se,
first_stride=first_stride,
final_use_se=final_use_se,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def mobilenetv3_small_w7d20(**kwargs):
"""
MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs)
def mobilenetv3_small_wd2(**kwargs):
"""
MobileNetV3 Small 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=0.5, model_name="mobilenetv3_small_wd2", **kwargs)
def mobilenetv3_small_w3d4(**kwargs):
"""
MobileNetV3 Small 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=0.75, model_name="mobilenetv3_small_w3d4", **kwargs)
def mobilenetv3_small_w1(**kwargs):
"""
MobileNetV3 Small 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=1.0, model_name="mobilenetv3_small_w1", **kwargs)
def mobilenetv3_small_w5d4(**kwargs):
"""
MobileNetV3 Small 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=1.25, model_name="mobilenetv3_small_w5d4", **kwargs)
def mobilenetv3_large_w7d20(**kwargs):
"""
MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs)
def mobilenetv3_large_wd2(**kwargs):
"""
MobileNetV3 Large 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=0.5, model_name="mobilenetv3_large_wd2", **kwargs)
def mobilenetv3_large_w3d4(**kwargs):
"""
MobileNetV3 Large 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=0.75, model_name="mobilenetv3_large_w3d4", **kwargs)
def mobilenetv3_large_w1(**kwargs):
"""
MobileNetV3 Large 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=1.0, model_name="mobilenetv3_large_w1", **kwargs)
def mobilenetv3_large_w5d4(**kwargs):
"""
MobileNetV3 Large 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=1.25, model_name="mobilenetv3_large_w5d4", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
mobilenetv3_small_w7d20,
mobilenetv3_small_wd2,
mobilenetv3_small_w3d4,
mobilenetv3_small_w1,
mobilenetv3_small_w5d4,
mobilenetv3_large_w7d20,
mobilenetv3_large_wd2,
mobilenetv3_large_w3d4,
mobilenetv3_large_w1,
mobilenetv3_large_w5d4,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenetv3_small_w7d20 or weight_count == 2159600)
assert (model != mobilenetv3_small_wd2 or weight_count == 2288976)
assert (model != mobilenetv3_small_w3d4 or weight_count == 2581312)
assert (model != mobilenetv3_small_w1 or weight_count == 2945288)
assert (model != mobilenetv3_small_w5d4 or weight_count == 3643632)
assert (model != mobilenetv3_large_w7d20 or weight_count == 2943080)
assert (model != mobilenetv3_large_wd2 or weight_count == 3334896)
assert (model != mobilenetv3_large_w3d4 or weight_count == 4263496)
assert (model != mobilenetv3_large_w1 or weight_count == 5481752)
assert (model != mobilenetv3_large_w5d4 or weight_count == 7459144)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 21,706 | 35.238731 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/diaresnet.py | """
DIA-ResNet for ImageNet-1K, implemented in Gluon.
Original paper: 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
"""
__all__ = ['DIAResNet', 'diaresnet10', 'diaresnet12', 'diaresnet14', 'diaresnetbc14b', 'diaresnet16', 'diaresnet18',
'diaresnet26', 'diaresnetbc26b', 'diaresnet34', 'diaresnetbc38b', 'diaresnet50', 'diaresnet50b',
'diaresnet101', 'diaresnet101b', 'diaresnet152', 'diaresnet152b', 'diaresnet200', 'diaresnet200b',
'DIAAttention', 'DIAResUnit']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, DualPathSequential
from .resnet import ResBlock, ResBottleneck, ResInitBlock
class FirstLSTMAmp(HybridBlock):
"""
First LSTM amplifier branch.
Parameters:
----------
in_units : int
Number of input channels.
units : int
Number of output channels.
"""
def __init__(self,
in_units,
units,
**kwargs):
super(FirstLSTMAmp, self).__init__(**kwargs)
mid_units = in_units // 4
with self.name_scope():
self.fc1 = nn.Dense(
units=mid_units,
in_units=in_units)
self.activ = nn.Activation("relu")
self.fc2 = nn.Dense(
units=units,
in_units=mid_units)
def hybrid_forward(self, F, x):
x = self.fc1(x)
x = self.activ(x)
x = self.fc2(x)
return x
class DIALSTMCell(HybridBlock):
"""
DIA-LSTM cell.
Parameters:
----------
in_x_features : int
Number of x input channels.
in_h_features : int
Number of h input channels.
num_layers : int
Number of amplifiers.
dropout_rate : float, default 0.1
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_x_features,
in_h_features,
num_layers,
dropout_rate=0.1,
**kwargs):
super(DIALSTMCell, self).__init__(**kwargs)
self.num_layers = num_layers
out_features = 4 * in_h_features
with self.name_scope():
self.x_amps = nn.HybridSequential(prefix="")
self.h_amps = nn.HybridSequential(prefix="")
for i in range(num_layers):
amp_class = FirstLSTMAmp if i == 0 else nn.Dense
self.x_amps.add(amp_class(
in_units=in_x_features,
units=out_features))
self.h_amps.add(amp_class(
in_units=in_h_features,
units=out_features))
in_x_features = in_h_features
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x, h, c):
hy = []
cy = []
for i in range(self.num_layers):
hx_i = h[i]
cx_i = c[i]
gates = self.x_amps[i](x) + self.h_amps[i](hx_i)
i_gate, f_gate, c_gate, o_gate = F.split(gates, axis=1, num_outputs=4)
i_gate = F.sigmoid(i_gate)
f_gate = F.sigmoid(f_gate)
c_gate = F.tanh(c_gate)
o_gate = F.sigmoid(o_gate)
cy_i = (f_gate * cx_i) + (i_gate * c_gate)
hy_i = o_gate * F.sigmoid(cy_i)
cy.append(cy_i)
hy.append(hy_i)
x = self.dropout(hy_i)
return hy, cy
class DIAAttention(HybridBlock):
"""
DIA-Net attention module.
Parameters:
----------
in_x_features : int
Number of x input channels.
in_h_features : int
Number of h input channels.
num_layers : int, default 1
Number of amplifiers.
"""
def __init__(self,
in_x_features,
in_h_features,
num_layers=1,
**kwargs):
super(DIAAttention, self).__init__(**kwargs)
self.num_layers = num_layers
with self.name_scope():
self.lstm = DIALSTMCell(
in_x_features=in_x_features,
in_h_features=in_h_features,
num_layers=num_layers)
def hybrid_forward(self, F, x, hc=None):
w = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
w = w.flatten()
if hc is None:
h = [F.zeros_like(w)] * self.num_layers
c = [F.zeros_like(w)] * self.num_layers
else:
h, c = hc
h, c = self.lstm(w, h, c)
w = h[self.num_layers - 1].expand_dims(axis=-1).expand_dims(axis=-1)
x = F.broadcast_mul(x, w)
return x, (h, c)
class DIAResUnit(HybridBlock):
"""
DIA-ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer in bottleneck.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer in bottleneck.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
attention : nn.Module, default None
Attention module.
"""
def __init__(self,
in_channels,
out_channels,
strides,
padding=1,
dilation=1,
bn_use_global_stats=False,
bottleneck=True,
conv1_stride=False,
attention=None,
**kwargs):
super(DIAResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
conv1_stride=conv1_stride)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
self.attention = attention
def hybrid_forward(self, F, x, hc=None):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x, hc = self.attention(x, hc)
x = x + identity
x = self.activ(x)
return x, hc
class DIAResNet(HybridBlock):
"""
DIA-ResNet model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(DIAResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential(
return_two=False,
prefix="stage{}_".format(i + 1))
attention = DIAAttention(
in_x_features=channels_per_stage[0],
in_h_features=channels_per_stage[0])
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(DIAResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
attention=attention))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_diaresnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DIA-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported DIA-ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = DIAResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def diaresnet10(**kwargs):
"""
DIA-ResNet-10 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=10, model_name="diaresnet10", **kwargs)
def diaresnet12(**kwargs):
"""
DIA-ResNet-12 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=12, model_name="diaresnet12", **kwargs)
def diaresnet14(**kwargs):
"""
DIA-ResNet-14 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=14, model_name="diaresnet14", **kwargs)
def diaresnetbc14b(**kwargs):
"""
DIA-ResNet-BC-14b model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="diaresnetbc14b", **kwargs)
def diaresnet16(**kwargs):
"""
DIA-ResNet-16 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=16, model_name="diaresnet16", **kwargs)
def diaresnet18(**kwargs):
"""
DIA-ResNet-18 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=18, model_name="diaresnet18", **kwargs)
def diaresnet26(**kwargs):
"""
DIA-ResNet-26 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=26, bottleneck=False, model_name="diaresnet26", **kwargs)
def diaresnetbc26b(**kwargs):
"""
DIA-ResNet-BC-26b model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="diaresnetbc26b", **kwargs)
def diaresnet34(**kwargs):
"""
DIA-ResNet-34 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=34, model_name="diaresnet34", **kwargs)
def diaresnetbc38b(**kwargs):
"""
DIA-ResNet-BC-38b model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="diaresnetbc38b", **kwargs)
def diaresnet50(**kwargs):
"""
DIA-ResNet-50 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=50, model_name="diaresnet50", **kwargs)
def diaresnet50b(**kwargs):
"""
DIA-ResNet-50 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit
Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=50, conv1_stride=False, model_name="diaresnet50b", **kwargs)
def diaresnet101(**kwargs):
"""
DIA-ResNet-101 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=101, model_name="diaresnet101", **kwargs)
def diaresnet101b(**kwargs):
"""
DIA-ResNet-101 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit
Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=101, conv1_stride=False, model_name="diaresnet101b", **kwargs)
def diaresnet152(**kwargs):
"""
DIA-ResNet-152 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=152, model_name="diaresnet152", **kwargs)
def diaresnet152b(**kwargs):
"""
DIA-ResNet-152 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit
Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=152, conv1_stride=False, model_name="diaresnet152b", **kwargs)
def diaresnet200(**kwargs):
"""
DIA-ResNet-200 model 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=200, model_name="diaresnet200", **kwargs)
def diaresnet200b(**kwargs):
"""
DIA-ResNet-200 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit
Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diaresnet(blocks=200, conv1_stride=False, model_name="diaresnet200b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
diaresnet10,
diaresnet12,
diaresnet14,
diaresnetbc14b,
diaresnet16,
diaresnet18,
diaresnet26,
diaresnetbc26b,
diaresnet34,
diaresnetbc38b,
diaresnet50,
diaresnet50b,
diaresnet101,
diaresnet101b,
diaresnet152,
diaresnet152b,
diaresnet200,
diaresnet200b,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != diaresnet10 or weight_count == 6297352)
assert (model != diaresnet12 or weight_count == 6371336)
assert (model != diaresnet14 or weight_count == 6666760)
assert (model != diaresnetbc14b or weight_count == 24023976)
assert (model != diaresnet16 or weight_count == 7847432)
assert (model != diaresnet18 or weight_count == 12568072)
assert (model != diaresnet26 or weight_count == 18838792)
assert (model != diaresnetbc26b or weight_count == 29954216)
assert (model != diaresnet34 or weight_count == 22676232)
assert (model != diaresnetbc38b or weight_count == 35884456)
assert (model != diaresnet50 or weight_count == 39516072)
assert (model != diaresnet50b or weight_count == 39516072)
assert (model != diaresnet101 or weight_count == 58508200)
assert (model != diaresnet101b or weight_count == 58508200)
assert (model != diaresnet152 or weight_count == 74151848)
assert (model != diaresnet152b or weight_count == 74151848)
assert (model != diaresnet200 or weight_count == 78632872)
assert (model != diaresnet200b or weight_count == 78632872)
x = mx.nd.zeros((14, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (14, 1000))
if __name__ == "__main__":
_test()
| 27,098 | 33.565051 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/lffd.py | """
LFFD for face detection, implemented in Gluon.
Original paper: 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633.
"""
__all__ = ['LFFD', 'lffd20x5s320v2_widerface', 'lffd25x8s560v1_widerface']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent
from .common import conv3x3, conv1x1_block, conv3x3_block, MultiOutputSequential, ParallelConcurent
from .resnet import ResUnit
from .preresnet import PreResUnit
class LffdDetectionBranch(HybridBlock):
"""
LFFD specific detection branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_bias : bool
Whether the layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layer.
"""
def __init__(self,
in_channels,
out_channels,
use_bias,
use_bn,
**kwargs):
super(LffdDetectionBranch, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=in_channels,
use_bias=use_bias,
use_bn=use_bn)
self.conv2 = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class LffdDetectionBlock(HybridBlock):
"""
LFFD specific detection block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
use_bias : bool
Whether the layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layer.
"""
def __init__(self,
in_channels,
mid_channels,
use_bias,
use_bn,
**kwargs):
super(LffdDetectionBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_bias=use_bias,
use_bn=use_bn)
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(LffdDetectionBranch(
in_channels=mid_channels,
out_channels=4,
use_bias=use_bias,
use_bn=use_bn))
self.branches.add(LffdDetectionBranch(
in_channels=mid_channels,
out_channels=2,
use_bias=use_bias,
use_bn=use_bn))
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.branches(x)
return x
class LFFD(HybridBlock):
"""
LFFD model from 'LFFD: A Light and Fast Face Detector for Edge Devices,' https://arxiv.org/abs/1904.10633.
Parameters:
----------
enc_channels : list of int
Number of output channels for each encoder stage.
dec_channels : int
Number of output channels for each decoder stage.
init_block_channels : int
Number of output channels for the initial encoder unit.
layers : list of int
Number of units in each encoder stage.
int_bends : list of int
Number of internal bends for each encoder stage.
use_preresnet : bool
Whether to use PreResnet backbone instead of ResNet.
receptive_field_center_starts : list of int
The start location of the first receptive field of each scale.
receptive_field_strides : list of int
Receptive field stride for each scale.
bbox_factors : list of float
A half of bbox upper bound for each scale.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (640, 640)
Spatial size of the expected input image.
"""
def __init__(self,
enc_channels,
dec_channels,
init_block_channels,
layers,
int_bends,
use_preresnet,
receptive_field_center_starts,
receptive_field_strides,
bbox_factors,
in_channels=3,
in_size=(480, 640),
**kwargs):
super(LFFD, self).__init__(**kwargs)
self.in_size = in_size
self.receptive_field_center_starts = receptive_field_center_starts
self.receptive_field_strides = receptive_field_strides
self.bbox_factors = bbox_factors
unit_class = PreResUnit if use_preresnet else ResUnit
use_bias = True
use_bn = False
with self.name_scope():
self.encoder = MultiOutputSequential(return_last=False)
self.encoder.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
padding=0,
use_bias=use_bias,
use_bn=use_bn))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(enc_channels):
layers_per_stage = layers[i]
int_bends_per_stage = int_bends[i]
stage = MultiOutputSequential(prefix="stage{}_".format(i + 1), multi_output=False, dual_output=True)
stage.add(conv3x3(
in_channels=in_channels,
out_channels=channels_per_stage,
strides=2,
padding=0,
use_bias=use_bias))
for j in range(layers_per_stage):
unit = unit_class(
in_channels=channels_per_stage,
out_channels=channels_per_stage,
strides=1,
use_bias=use_bias,
use_bn=use_bn,
bottleneck=False)
if layers_per_stage - j <= int_bends_per_stage:
unit.do_output = True
stage.add(unit)
final_activ = nn.Activation("relu")
final_activ.do_output = True
stage.add(final_activ)
stage.do_output2 = True
in_channels = channels_per_stage
self.encoder.add(stage)
self.decoder = ParallelConcurent()
k = 0
for i, channels_per_stage in enumerate(enc_channels):
layers_per_stage = layers[i]
int_bends_per_stage = int_bends[i]
for j in range(layers_per_stage):
if layers_per_stage - j <= int_bends_per_stage:
self.decoder.add(LffdDetectionBlock(
in_channels=channels_per_stage,
mid_channels=dec_channels,
use_bias=use_bias,
use_bn=use_bn))
k += 1
self.decoder.add(LffdDetectionBlock(
in_channels=channels_per_stage,
mid_channels=dec_channels,
use_bias=use_bias,
use_bn=use_bn))
k += 1
def hybrid_forward(self, F, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def get_lffd(blocks,
use_preresnet,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create LFFD model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
use_preresnet : bool
Whether to use PreResnet backbone instead of ResNet.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 20:
layers = [3, 1, 1, 1, 1]
enc_channels = [64, 64, 64, 128, 128]
int_bends = [0, 0, 0, 0, 0]
receptive_field_center_starts = [3, 7, 15, 31, 63]
receptive_field_strides = [4, 8, 16, 32, 64]
bbox_factors = [10.0, 20.0, 40.0, 80.0, 160.0]
elif blocks == 25:
layers = [4, 2, 1, 3]
enc_channels = [64, 64, 128, 128]
int_bends = [1, 1, 0, 2]
receptive_field_center_starts = [3, 3, 7, 7, 15, 31, 31, 31]
receptive_field_strides = [4, 4, 8, 8, 16, 32, 32, 32]
bbox_factors = [7.5, 10.0, 20.0, 35.0, 55.0, 125.0, 200.0, 280.0]
else:
raise ValueError("Unsupported LFFD with number of blocks: {}".format(blocks))
dec_channels = 128
init_block_channels = 64
net = LFFD(
enc_channels=enc_channels,
dec_channels=dec_channels,
init_block_channels=init_block_channels,
layers=layers,
int_bends=int_bends,
use_preresnet=use_preresnet,
receptive_field_center_starts=receptive_field_center_starts,
receptive_field_strides=receptive_field_strides,
bbox_factors=bbox_factors,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def lffd20x5s320v2_widerface(**kwargs):
"""
LFFD-320-20L-5S-V2 model for WIDER FACE from 'LFFD: A Light and Fast Face Detector for Edge Devices,'
https://arxiv.org/abs/1904.10633.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_lffd(blocks=20, use_preresnet=True, model_name="lffd20x5s320v2_widerface", **kwargs)
def lffd25x8s560v1_widerface(**kwargs):
"""
LFFD-560-25L-8S-V1 model for WIDER FACE from 'LFFD: A Light and Fast Face Detector for Edge Devices,'
https://arxiv.org/abs/1904.10633.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_lffd(blocks=25, use_preresnet=False, model_name="lffd25x8s560v1_widerface", **kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (480, 640)
pretrained = False
models = [
(lffd20x5s320v2_widerface, 5),
(lffd25x8s560v1_widerface, 8),
]
for model, num_outs in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != lffd20x5s320v2_widerface or weight_count == 1520606)
assert (model != lffd25x8s560v1_widerface or weight_count == 2290608)
batch = 14
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert (len(y) == num_outs)
if __name__ == "__main__":
_test()
| 12,410 | 33.28453 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/sepreresnet.py | """
SE-PreResNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEPreResNet', 'sepreresnet10', 'sepreresnet12', 'sepreresnet14', 'sepreresnet16', 'sepreresnet18',
'sepreresnet26', 'sepreresnetbc26b', 'sepreresnet34', 'sepreresnetbc38b', 'sepreresnet50', 'sepreresnet50b',
'sepreresnet101', 'sepreresnet101b', 'sepreresnet152', 'sepreresnet152b', 'sepreresnet200',
'sepreresnet200b', 'SEPreResUnit']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, SEBlock
from .preresnet import PreResBlock, PreResBottleneck, PreResInitBlock, PreResActivation
class SEPreResUnit(HybridBlock):
"""
SE-PreResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
conv1_stride,
**kwargs):
super(SEPreResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if bottleneck:
self.body = PreResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
conv1_stride=conv1_stride)
else:
self.body = PreResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
self.se = SEBlock(channels=out_channels)
if self.resize_identity:
self.identity_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
strides=strides)
def hybrid_forward(self, F, x):
identity = x
x, x_pre_activ = self.body(x)
x = self.se(x)
if self.resize_identity:
identity = self.identity_conv(x_pre_activ)
x = x + identity
return x
class SEPreResNet(HybridBlock):
"""
SE-PreResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SEPreResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SEPreResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_sepreresnet(blocks,
bottleneck=None,
conv1_stride=True,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SE-PreResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported SE-PreResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def sepreresnet10(**kwargs):
"""
SE-PreResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=10, model_name="sepreresnet10", **kwargs)
def sepreresnet12(**kwargs):
"""
SE-PreResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=12, model_name="sepreresnet12", **kwargs)
def sepreresnet14(**kwargs):
"""
SE-PreResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=14, model_name="sepreresnet14", **kwargs)
def sepreresnet16(**kwargs):
"""
SE-PreResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=16, model_name="sepreresnet16", **kwargs)
def sepreresnet18(**kwargs):
"""
SE-PreResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=18, model_name="sepreresnet18", **kwargs)
def sepreresnet26(**kwargs):
"""
SE-PreResNet-26 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=26, bottleneck=False, model_name="sepreresnet26", **kwargs)
def sepreresnetbc26b(**kwargs):
"""
SE-PreResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc26b", **kwargs)
def sepreresnet34(**kwargs):
"""
SE-PreResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=34, model_name="sepreresnet34", **kwargs)
def sepreresnetbc38b(**kwargs):
"""
SE-PreResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc38b", **kwargs)
def sepreresnet50(**kwargs):
"""
SE-PreResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=50, model_name="sepreresnet50", **kwargs)
def sepreresnet50b(**kwargs):
"""
SE-PreResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=50, conv1_stride=False, model_name="sepreresnet50b", **kwargs)
def sepreresnet101(**kwargs):
"""
SE-PreResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=101, model_name="sepreresnet101", **kwargs)
def sepreresnet101b(**kwargs):
"""
SE-PreResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=101, conv1_stride=False, model_name="sepreresnet101b", **kwargs)
def sepreresnet152(**kwargs):
"""
SE-PreResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=152, model_name="sepreresnet152", **kwargs)
def sepreresnet152b(**kwargs):
"""
SE-PreResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=152, conv1_stride=False, model_name="sepreresnet152b", **kwargs)
def sepreresnet200(**kwargs):
"""
SE-PreResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an
experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=200, model_name="sepreresnet200", **kwargs)
def sepreresnet200b(**kwargs):
"""
SE-PreResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=200, conv1_stride=False, model_name="sepreresnet200b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
sepreresnet10,
sepreresnet12,
sepreresnet14,
sepreresnet16,
sepreresnet18,
sepreresnet26,
sepreresnetbc26b,
sepreresnet34,
sepreresnetbc38b,
sepreresnet50,
sepreresnet50b,
sepreresnet101,
sepreresnet101b,
sepreresnet152,
sepreresnet152b,
sepreresnet200,
sepreresnet200b,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sepreresnet10 or weight_count == 5461668)
assert (model != sepreresnet12 or weight_count == 5536232)
assert (model != sepreresnet14 or weight_count == 5833840)
assert (model != sepreresnet16 or weight_count == 7022976)
assert (model != sepreresnet18 or weight_count == 11776928)
assert (model != sepreresnet26 or weight_count == 18092188)
assert (model != sepreresnetbc26b or weight_count == 17388424)
assert (model != sepreresnet34 or weight_count == 21957204)
assert (model != sepreresnetbc38b or weight_count == 24019064)
assert (model != sepreresnet50 or weight_count == 28080472)
assert (model != sepreresnet50b or weight_count == 28080472)
assert (model != sepreresnet101 or weight_count == 49319320)
assert (model != sepreresnet101b or weight_count == 49319320)
assert (model != sepreresnet152 or weight_count == 66814296)
assert (model != sepreresnet152b or weight_count == 66814296)
assert (model != sepreresnet200 or weight_count == 71828312)
assert (model != sepreresnet200b or weight_count == 71828312)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 21,007 | 34.071786 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/resnext.py | """
ResNeXt for ImageNet-1K, implemented in Gluon.
Original paper: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431.
"""
__all__ = ['ResNeXt', 'resnext14_16x4d', 'resnext14_32x2d', 'resnext14_32x4d', 'resnext26_16x4d', 'resnext26_32x2d',
'resnext26_32x4d', 'resnext38_32x4d', 'resnext50_32x4d', 'resnext101_32x4d', 'resnext101_64x4d',
'ResNeXtBottleneck', 'ResNeXtUnit']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
from .resnet import ResInitBlock
class ResNeXtBottleneck(HybridBlock):
"""
ResNeXt bottleneck block for residual path in ResNeXt unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bn_use_global_stats,
bottleneck_factor=4,
**kwargs):
super(ResNeXtBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // bottleneck_factor
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=group_width,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=group_width,
out_channels=group_width,
strides=strides,
groups=cardinality,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class ResNeXtUnit(HybridBlock):
"""
ResNeXt unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bn_use_global_stats,
**kwargs):
super(ResNeXtUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = ResNeXtBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class ResNeXt(HybridBlock):
"""
ResNeXt model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ResNeXt, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ResNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_resnext(blocks,
cardinality,
bottleneck_width,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ResNeXt model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 14:
layers = [1, 1, 1, 1]
elif blocks == 26:
layers = [2, 2, 2, 2]
elif blocks == 38:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported ResNeXt with number of blocks: {}".format(blocks))
assert (sum(layers) * 3 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = ResNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def resnext14_16x4d(**kwargs):
"""
ResNeXt-14 (16x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=14, cardinality=16, bottleneck_width=4, model_name="resnext14_16x4d", **kwargs)
def resnext14_32x2d(**kwargs):
"""
ResNeXt-14 (32x2d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=14, cardinality=32, bottleneck_width=2, model_name="resnext14_32x2d", **kwargs)
def resnext14_32x4d(**kwargs):
"""
ResNeXt-14 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=14, cardinality=32, bottleneck_width=4, model_name="resnext14_32x4d", **kwargs)
def resnext26_16x4d(**kwargs):
"""
ResNeXt-26 (16x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=26, cardinality=16, bottleneck_width=4, model_name="resnext26_16x4d", **kwargs)
def resnext26_32x2d(**kwargs):
"""
ResNeXt-26 (32x2d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=26, cardinality=32, bottleneck_width=2, model_name="resnext26_32x2d", **kwargs)
def resnext26_32x4d(**kwargs):
"""
ResNeXt-26 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=26, cardinality=32, bottleneck_width=4, model_name="resnext26_32x4d", **kwargs)
def resnext38_32x4d(**kwargs):
"""
ResNeXt-38 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=38, cardinality=32, bottleneck_width=4, model_name="resnext38_32x4d", **kwargs)
def resnext50_32x4d(**kwargs):
"""
ResNeXt-50 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="resnext50_32x4d", **kwargs)
def resnext101_32x4d(**kwargs):
"""
ResNeXt-101 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="resnext101_32x4d", **kwargs)
def resnext101_64x4d(**kwargs):
"""
ResNeXt-101 (64x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="resnext101_64x4d", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
resnext14_16x4d,
resnext14_32x2d,
resnext14_32x4d,
resnext26_16x4d,
resnext26_32x2d,
resnext26_32x4d,
resnext38_32x4d,
resnext50_32x4d,
resnext101_32x4d,
resnext101_64x4d,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnext14_16x4d or weight_count == 7127336)
assert (model != resnext14_32x2d or weight_count == 7029416)
assert (model != resnext14_32x4d or weight_count == 9411880)
assert (model != resnext26_16x4d or weight_count == 10119976)
assert (model != resnext26_32x2d or weight_count == 9924136)
assert (model != resnext26_32x4d or weight_count == 15389480)
assert (model != resnext38_32x4d or weight_count == 21367080)
assert (model != resnext50_32x4d or weight_count == 25028904)
assert (model != resnext101_32x4d or weight_count == 44177704)
assert (model != resnext101_64x4d or weight_count == 83455272)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 17,156 | 33.245509 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/jasper.py | """
Jasper/DR for ASR, implemented in Gluon.
Original paper: 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288.
"""
__all__ = ['Jasper', 'jasper5x3', 'jasper10x4', 'jasper10x5', 'get_jasper', 'MaskConv1d', 'NemoAudioReader',
'NemoMelSpecExtractor', 'CtcDecoder']
import os
import numpy as np
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import BatchNormExtra, DualPathSequential, DualPathParallelConcurent
def outmask_fill(F, x, x_len, value=0.0):
"""
Masked fill a tensor.
Parameters:
----------
F : object
MXNet tensor processing package.
x : tensor
Input tensor.
x_len : tensor
Tensor with lengths.
value : float, default 0.0
Filled value.
Returns:
-------
tensor
Resulted tensor.
"""
x = F.SequenceMask(
data=x.swapaxes(1, 2),
sequence_length=x_len,
use_sequence_length=True,
value=value,
axis=1).swapaxes(1, 2)
return x
def masked_normalize2(F, x, x_len):
"""
Normalize a tensor with mask (scheme #2).
Parameters:
----------
F : object
MXNet tensor processing package.
x : tensor
Input tensor.
x_len : tensor
Tensor with lengths.
Returns:
-------
tensor
Resulted tensor.
"""
x = outmask_fill(F, x, x_len)
x_len_float = x_len.astype("float32").expand_dims(axis=1)
x_mean = x.sum(axis=2) / x_len_float
x2_mean = x.square().sum(axis=2) / x_len_float
x_std = (x2_mean - x_mean.square()).sqrt()
x = (x - x_mean.expand_dims(axis=2)) / x_std.expand_dims(axis=2)
return x
class NemoAudioReader(object):
"""
Audio Reader from NVIDIA NEMO toolkit.
Parameters:
----------
desired_audio_sample_rate : int, default 16000
Desired audio sample rate.
trunc_value : int or None, default None
Value to truncate.
"""
def __init__(self, desired_audio_sample_rate=16000):
super(NemoAudioReader, self).__init__()
self.desired_audio_sample_rate = desired_audio_sample_rate
def read_from_file(self, audio_file_path):
"""
Read audio from file.
Parameters:
----------
audio_file_path : str
Path to audio file.
Returns:
-------
np.array
Audio data.
"""
from soundfile import SoundFile
with SoundFile(audio_file_path, "r") as data:
sample_rate = data.samplerate
audio_data = data.read(dtype="float32")
audio_data = audio_data.transpose()
if sample_rate != self.desired_audio_sample_rate:
from librosa.core import resample as lr_resample
audio_data = lr_resample(y=audio_data, orig_sr=sample_rate, target_sr=self.desired_audio_sample_rate)
if audio_data.ndim >= 2:
audio_data = np.mean(audio_data, axis=1)
return audio_data
def read_from_files(self, audio_file_paths):
"""
Read audios from files.
Parameters:
----------
audio_file_paths : list of str
Paths to audio files.
Returns:
-------
list of np.array
Audio data.
"""
assert (type(audio_file_paths) in (list, tuple))
audio_data_list = []
for audio_file_path in audio_file_paths:
audio_data = self.read_from_file(audio_file_path)
audio_data_list.append(audio_data)
return audio_data_list
class NemoMelSpecExtractor(HybridBlock):
"""
Mel-Spectrogram Extractor from NVIDIA NEMO toolkit.
Parameters:
----------
sample_rate : int, default 16000
Sample rate of the input audio data.
window_size_sec : float, default 0.02
Size of window for FFT in seconds.
window_stride_sec : float, default 0.01
Stride of window for FFT in seconds.
n_fft : int, default 512
Length of FT window.
n_filters : int, default 64
Number of Mel spectrogram freq bins.
preemph : float, default 0.97
Amount of pre emphasis to add to audio.
dither : float, default 1.0e-05
Amount of white-noise dithering.
"""
def __init__(self,
sample_rate=16000,
window_size_sec=0.02,
window_stride_sec=0.01,
n_fft=512,
n_filters=64,
preemph=0.97,
dither=1.0e-5,
**kwargs):
super(NemoMelSpecExtractor, self).__init__(**kwargs)
self.log_zero_guard_value = 2 ** -24
win_length = int(window_size_sec * sample_rate)
self.hop_length = int(window_stride_sec * sample_rate)
self.n_filters = n_filters
from scipy import signal as scipy_signal
from librosa import stft as librosa_stft
window_arr = scipy_signal.hann(win_length, sym=True)
self.stft = lambda x: librosa_stft(
x,
n_fft=n_fft,
hop_length=self.hop_length,
win_length=win_length,
window=window_arr,
center=True)
self.dither = dither
self.preemph = preemph
self.pad_align = 16
from librosa.filters import mel as librosa_mel
fb_arr = librosa_mel(
sr=sample_rate,
n_fft=n_fft,
n_mels=n_filters,
fmin=0.0,
fmax=(sample_rate / 2.0))
fb_arr = np.expand_dims(fb_arr, axis=0)
with self.name_scope():
self.window = self.params.get(
"window",
grad_req="null",
shape=window_arr.shape,
init=mx.init.Constant(window_arr),
allow_deferred_init=False,
differentiable=False)
self.fb = self.params.get(
"fb",
grad_req="null",
shape=fb_arr.shape,
init=mx.init.Constant(fb_arr),
allow_deferred_init=False,
differentiable=False)
def hybrid_forward(self, F, x, x_len, window=None, fb=None):
x_len = ((x_len.astype("float32") / self.hop_length).ceil()).astype("int64")
if self.dither > 0:
x += self.dither * F.random.normal_like(x)
x = F.concat(x[:, :1], x[:, 1:] - self.preemph * x[:, :-1], dim=1)
x = self.calc_real_stft(F, x)
x = F.power(x, 2)
x = F.batch_dot(fb.repeat(repeats=x.shape[0], axis=0), x)
x = F.log(x + self.log_zero_guard_value)
x = masked_normalize2(F, x, x_len)
x = outmask_fill(F, x, x_len)
x_len_max = x.shape[-1]
pad_rem = x_len_max % self.pad_align
if pad_rem != 0:
x = x.expand_dims(1).pad(mode="constant", pad_width=(0, 0, 0, 0, 0, 0, 0, self.pad_align - pad_rem),
constant_value=0).squeeze(1)
return x, x_len
def calc_real_stft(self, F, x):
x_np = x.asnumpy()
ys = []
for xi_np in x_np:
ys.append(self.stft(xi_np))
x_np = np.array(ys)
x_np = np.abs(x_np)
x = F.array(x_np, ctx=x.context)
return x
def calc_flops(self, x):
assert (x.shape[0] == 1)
num_flops = x[0].size
num_macs = 0
return num_flops, num_macs
class CtcDecoder(object):
"""
CTC decoder (to decode a sequence of labels to words).
Parameters:
----------
vocabulary : list of str
Vocabulary of the dataset.
"""
def __init__(self,
vocabulary):
super().__init__()
self.blank_id = len(vocabulary)
self.labels_map = dict([(i, vocabulary[i]) for i in range(len(vocabulary))])
def __call__(self,
predictions):
"""
Decode a sequence of labels to words.
Parameters:
----------
predictions : np.array of int or list of list of int
Tensor with predicted labels.
Returns:
-------
list of str
Words.
"""
hypotheses = []
for prediction in predictions:
decoded_prediction = []
previous = self.blank_id
for p in prediction:
if (p != previous or previous == self.blank_id) and p != self.blank_id:
decoded_prediction.append(p)
previous = p
hypothesis = "".join([self.labels_map[c] for c in decoded_prediction])
hypotheses.append(hypothesis)
return hypotheses
def conv1d1(in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
**kwargs):
"""
1-dim kernel version of the 1D convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv1D(
channels=out_channels,
kernel_size=1,
strides=strides,
groups=groups,
use_bias=use_bias,
in_channels=in_channels,
**kwargs)
class MaskConv1d(nn.Conv1D):
"""
Masked 1D convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 1 int
Convolution window size.
strides : int or tuple/list of 1 int
Strides of the convolution.
padding : int or tuple/list of 1 int, default 0
Padding value for convolution layer.
dilation : int or tuple/list of 1 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_mask : bool, default True
Whether to use mask.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding=0,
dilation=1,
groups=1,
use_bias=False,
use_mask=True,
**kwargs):
super(MaskConv1d, self).__init__(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
in_channels=in_channels,
**kwargs)
self.use_mask = use_mask
if self.use_mask:
self.kernel_size = kernel_size[0] if isinstance(kernel_size, (list, tuple)) else kernel_size
self.strides = strides[0] if isinstance(strides, (list, tuple)) else strides
self.padding = padding[0] if isinstance(padding, (list, tuple)) else padding
self.dilation = dilation[0] if isinstance(dilation, (list, tuple)) else dilation
def hybrid_forward(self, F, x, x_len, weight, bias=None):
if self.use_mask:
x = outmask_fill(F, x, x_len)
x_len = ((x_len + 2 * self.padding - self.dilation * (self.kernel_size - 1) - 1) / self.strides + 1).floor()
x = super(MaskConv1d, self).hybrid_forward(F, x, weight=weight, bias=bias)
return x, x_len
def mask_conv1d1(in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
**kwargs):
"""
Masked 1-dim kernel version of the 1D convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
"""
return MaskConv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
groups=groups,
use_bias=use_bias,
**kwargs)
class MaskConvBlock1d(HybridBlock):
"""
Masked 1D convolution block with batch normalization, activation, and dropout.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
strides : int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
dropout_rate=0.0,
**kwargs):
super(MaskConvBlock1d, self).__init__(**kwargs)
self.activate = (activation is not None)
self.use_bn = use_bn
self.use_dropout = (dropout_rate != 0.0)
with self.name_scope():
self.conv = MaskConv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias)
if self.use_bn:
self.bn = BatchNormExtra(
in_channels=out_channels,
epsilon=bn_epsilon,
use_global_stats=bn_use_global_stats,
cudnn_off=bn_cudnn_off)
if self.activate:
self.activ = activation()
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x, x_len):
x, x_len = self.conv(x, x_len)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.use_dropout:
x = self.dropout(x)
return x, x_len
def mask_conv1d1_block(in_channels,
out_channels,
strides=1,
padding=0,
**kwargs):
"""
1-dim kernel version of the masked 1D convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int, default 1
Strides of the convolution.
padding : int, default 0
Padding value for convolution layer.
"""
return MaskConvBlock1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=padding,
**kwargs)
class ChannelShuffle1d(HybridBlock):
"""
1D version of the channel shuffle layer.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups,
**kwargs):
super(ChannelShuffle1d, self).__init__(**kwargs)
assert (channels % groups == 0)
self.groups = groups
def hybrid_forward(self, F, x):
return x.reshape((0, -4, self.groups, -1, -2)).swapaxes(1, 2).reshape((0, -3, -2))
def __repr__(self):
s = "{name}(groups={groups})"
return s.format(
name=self.__class__.__name__,
groups=self.groups)
class DwsConvBlock1d(HybridBlock):
"""
Depthwise version of the 1D standard convolution block with batch normalization, activation, dropout, and channel
shuffle.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
strides : int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
dropout_rate=0.0,
**kwargs):
super(DwsConvBlock1d, self).__init__(**kwargs)
self.activate = (activation is not None)
self.use_bn = use_bn
self.use_dropout = (dropout_rate != 0.0)
self.use_channel_shuffle = (groups > 1)
with self.name_scope():
self.dw_conv = MaskConv1d(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=in_channels,
use_bias=use_bias)
self.pw_conv = mask_conv1d1(
in_channels=in_channels,
out_channels=out_channels,
groups=groups,
use_bias=use_bias)
if self.use_channel_shuffle:
self.shuffle = ChannelShuffle1d(
channels=out_channels,
groups=groups)
if self.use_bn:
self.bn = BatchNormExtra(
in_channels=out_channels,
epsilon=bn_epsilon,
use_global_stats=bn_use_global_stats,
cudnn_off=bn_cudnn_off)
if self.activate:
self.activ = activation()
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x, x_len):
x, x_len = self.dw_conv(x, x_len)
x, x_len = self.pw_conv(x, x_len)
if self.use_channel_shuffle:
x = self.shuffle(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.use_dropout:
x = self.dropout(x)
return x, x_len
class JasperUnit(HybridBlock):
"""
Jasper unit with residual connection.
Parameters:
----------
in_channels : int or list of int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
bn_epsilon : float
Small float added to variance in Batch norm.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
repeat : int
Count of body convolution blocks.
use_dw : bool
Whether to use depthwise block.
use_dr : bool
Whether to use dense residual scheme.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
bn_epsilon,
dropout_rate,
repeat,
use_dw,
use_dr,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(JasperUnit, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
self.use_dr = use_dr
block_class = DwsConvBlock1d if use_dw else MaskConvBlock1d
with self.name_scope():
if self.use_dr:
self.identity_block = DualPathParallelConcurent()
for i, dense_in_channels_i in enumerate(in_channels):
self.identity_block.add(mask_conv1d1_block(
in_channels=dense_in_channels_i,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
dropout_rate=0.0,
activation=None))
in_channels = in_channels[-1]
else:
self.identity_block = mask_conv1d1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
dropout_rate=0.0,
activation=None)
self.body = DualPathSequential()
for i in range(repeat):
activation = (lambda: nn.Activation("relu")) if i < repeat - 1 else None
dropout_rate_i = dropout_rate if i < repeat - 1 else 0.0
self.body.add(block_class(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=1,
padding=(kernel_size // 2),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
dropout_rate=dropout_rate_i,
activation=activation))
in_channels = out_channels
self.activ = nn.Activation("relu")
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x, x_len):
if self.use_dr:
x_len, y, y_len = x_len if type(x_len) is tuple else (x_len, None, None)
y = [x] if y is None else y + [x]
y_len = [x_len] if y_len is None else y_len + [x_len]
identity, _ = self.identity_block(y, y_len)
identity = F.stack(*identity, axis=1)
identity = identity.sum(axis=1)
else:
identity, _ = self.identity_block(x, x_len)
x, x_len = self.body(x, x_len)
x = x + identity
x = self.activ(x)
if self.use_dropout:
x = self.dropout(x)
if self.use_dr:
return x, (x_len, y, y_len)
else:
return x, x_len
class JasperFinalBlock(HybridBlock):
"""
Jasper specific final block.
Parameters:
----------
in_channels : int
Number of input channels.
channels : list of int
Number of output channels for each block.
kernel_sizes : list of int
Kernel sizes for each block.
bn_epsilon : float
Small float added to variance in Batch norm.
dropout_rates : list of int
Dropout rates for each block.
use_dw : bool
Whether to use depthwise block.
use_dr : bool
Whether to use dense residual scheme.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
channels,
kernel_sizes,
bn_epsilon,
dropout_rates,
use_dw,
use_dr,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(JasperFinalBlock, self).__init__(**kwargs)
self.use_dr = use_dr
conv1_class = DwsConvBlock1d if use_dw else MaskConvBlock1d
with self.name_scope():
self.conv1 = conv1_class(
in_channels=in_channels,
out_channels=channels[-2],
kernel_size=kernel_sizes[-2],
strides=1,
padding=(2 * kernel_sizes[-2] // 2 - 1),
dilation=2,
bn_epsilon=bn_epsilon,
dropout_rate=dropout_rates[-2],
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv2 = MaskConvBlock1d(
in_channels=channels[-2],
out_channels=channels[-1],
kernel_size=kernel_sizes[-1],
strides=1,
padding=(kernel_sizes[-1] // 2),
bn_epsilon=bn_epsilon,
dropout_rate=dropout_rates[-1],
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
def hybrid_forward(self, F, x, x_len):
if self.use_dr:
x_len = x_len[0]
x, x_len = self.conv1(x, x_len)
x, x_len = self.conv2(x, x_len)
return x, x_len
class Jasper(HybridBlock):
"""
Jasper/DR/QuartzNet model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
channels : list of int
Number of output channels for each unit and initial/final block.
kernel_sizes : list of int
Kernel sizes for each unit and initial/final block.
bn_epsilon : float
Small float added to variance in Batch norm.
dropout_rates : list of int
Dropout rates for each unit and initial/final block.
repeat : int
Count of body convolution blocks.
use_dw : bool
Whether to use depthwise block.
use_dr : bool
Whether to use dense residual scheme.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
from_audio : bool, default True
Whether to treat input as audio instead of Mel-specs.
dither : float, default 0.0
Amount of white-noise dithering.
return_text : bool, default False
Whether to return text instead of logits.
vocabulary : list of str or None, default None
Vocabulary of the dataset.
in_channels : int, default 64
Number of input channels (audio features).
classes : int, default 29
Number of classification classes (number of graphemes).
"""
def __init__(self,
channels,
kernel_sizes,
bn_epsilon,
dropout_rates,
repeat,
use_dw,
use_dr,
bn_use_global_stats=False,
bn_cudnn_off=False,
from_audio=True,
dither=0.0,
return_text=False,
vocabulary=None,
in_channels=64,
classes=29,
**kwargs):
super(Jasper, self).__init__(**kwargs)
self.in_size = in_channels
self.classes = classes
self.vocabulary = vocabulary
self.from_audio = from_audio
self.return_text = return_text
with self.name_scope():
if self.from_audio:
self.preprocessor = NemoMelSpecExtractor(dither=dither)
self.features = DualPathSequential()
init_block_class = DwsConvBlock1d if use_dw else MaskConvBlock1d
self.features.add(init_block_class(
in_channels=in_channels,
out_channels=channels[0],
kernel_size=kernel_sizes[0],
strides=2,
padding=(kernel_sizes[0] // 2),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
dropout_rate=dropout_rates[0]))
in_channels = channels[0]
in_channels_list = []
for i, (out_channels, kernel_size, dropout_rate) in\
enumerate(zip(channels[1:-2], kernel_sizes[1:-2], dropout_rates[1:-2])):
in_channels_list += [in_channels]
self.features.add(JasperUnit(
in_channels=(in_channels_list if use_dr else in_channels),
out_channels=out_channels,
kernel_size=kernel_size,
bn_epsilon=bn_epsilon,
dropout_rate=dropout_rate,
repeat=repeat,
use_dw=use_dw,
use_dr=use_dr,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = out_channels
self.features.add(JasperFinalBlock(
in_channels=in_channels,
channels=channels,
kernel_sizes=kernel_sizes,
bn_epsilon=bn_epsilon,
dropout_rates=dropout_rates,
use_dw=use_dw,
use_dr=use_dr,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = channels[-1]
self.output = conv1d1(
in_channels=in_channels,
out_channels=classes,
use_bias=True)
if self.return_text:
self.ctc_decoder = CtcDecoder(vocabulary=vocabulary)
def hybrid_forward(self, F, x, x_len=None):
if x_len is None:
assert (type(x) in (list, tuple))
x, x_len = x
if self.from_audio:
x, x_len = self.preprocessor(x, x_len)
x, x_len = self.features(x, x_len)
x = self.output(x)
if self.return_text:
greedy_predictions = x.swapaxes(1, 2).log_softmax(dim=-1).argmax(dim=-1, keepdim=False).asnumpy()
return self.ctc_decoder(greedy_predictions)
else:
return x, x_len
def get_jasper(version,
use_dw=False,
use_dr=False,
bn_epsilon=1e-3,
vocabulary=None,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create Jasper/DR/QuartzNet model with specific parameters.
Parameters:
----------
version : tuple of str
Model type and configuration.
use_dw : bool, default False
Whether to use depthwise block.
use_dr : bool, default False
Whether to use dense residual scheme.
bn_epsilon : float, default 1e-3
Small float added to variance in Batch norm.
vocabulary : list of str or None, default None
Vocabulary of the dataset.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
import numpy as np
blocks, repeat = tuple(map(int, version[1].split("x")))
main_stage_repeat = blocks // 5
model_type = version[0]
if model_type == "jasper":
channels_per_stage = [256, 256, 384, 512, 640, 768, 896, 1024]
kernel_sizes_per_stage = [11, 11, 13, 17, 21, 25, 29, 1]
dropout_rates_per_stage = [0.2, 0.2, 0.2, 0.2, 0.3, 0.3, 0.4, 0.4]
elif model_type == "quartznet":
channels_per_stage = [256, 256, 256, 512, 512, 512, 512, 1024]
kernel_sizes_per_stage = [33, 33, 39, 51, 63, 75, 87, 1]
dropout_rates_per_stage = [0.0] * 8
else:
raise ValueError("Unsupported Jasper family model type: {}".format(model_type))
stage_repeat = np.full((8,), 1)
stage_repeat[1:-2] *= main_stage_repeat
channels = sum([[a] * r for (a, r) in zip(channels_per_stage, stage_repeat)], [])
kernel_sizes = sum([[a] * r for (a, r) in zip(kernel_sizes_per_stage, stage_repeat)], [])
dropout_rates = sum([[a] * r for (a, r) in zip(dropout_rates_per_stage, stage_repeat)], [])
net = Jasper(
channels=channels,
kernel_sizes=kernel_sizes,
bn_epsilon=bn_epsilon,
dropout_rates=dropout_rates,
repeat=repeat,
use_dw=use_dw,
use_dr=use_dr,
vocabulary=vocabulary,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def jasper5x3(**kwargs):
"""
Jasper 5x3 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_jasper(version=("jasper", "5x3"), model_name="jasper5x3", **kwargs)
def jasper10x4(**kwargs):
"""
Jasper 10x4 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_jasper(version=("jasper", "10x4"), model_name="jasper10x4", **kwargs)
def jasper10x5(**kwargs):
"""
Jasper 10x5 model from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_jasper(version=("jasper", "10x5"), model_name="jasper10x5", **kwargs)
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def _test():
import mxnet as mx
pretrained = False
from_audio = True
# dither = 0.0
dither = 1.0e-5
audio_features = 64
classes = 29
models = [
jasper5x3,
jasper10x4,
jasper10x5,
]
for model in models:
net = model(
in_channels=audio_features,
classes=classes,
from_audio=from_audio,
dither=dither,
pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != jasper5x3 or weight_count == 107681053)
assert (model != jasper10x4 or weight_count == 261393693)
assert (model != jasper10x5 or weight_count == 322286877)
batch = 3
aud_scale = 640 if from_audio else 1
seq_len = np.random.randint(150, 250, batch) * aud_scale
seq_len_max = seq_len.max() + 2
x_shape = (batch, seq_len_max) if from_audio else (batch, audio_features, seq_len_max)
x = mx.nd.random.normal(shape=x_shape, ctx=ctx)
x_len = mx.nd.array(seq_len, ctx=ctx, dtype=np.long)
y, y_len = net(x, x_len)
assert (y.shape[:2] == (batch, net.classes))
if from_audio:
assert (y.shape[2] in range(seq_len_max // aud_scale * 2, seq_len_max // aud_scale * 2 + 9))
else:
assert (y.shape[2] in [seq_len_max // 2, seq_len_max // 2 + 1])
if __name__ == "__main__":
_test()
| 38,940 | 31.806234 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/resneta.py | """
ResNet(A) with average downsampling for ImageNet-1K, implemented in Gluon.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['ResNetA', 'resneta10', 'resnetabc14b', 'resneta18', 'resneta50b', 'resneta101b', 'resneta152b']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block
from .resnet import ResBlock, ResBottleneck
from .senet import SEInitBlock
class ResADownBlock(HybridBlock):
"""
ResNet(A) downsample block for the identity branch of a residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer in bottleneck.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
strides,
dilation=1,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(ResADownBlock, self).__init__(**kwargs)
with self.name_scope():
self.pool = nn.AvgPool2D(
pool_size=(strides if dilation == 1 else 1),
strides=(strides if dilation == 1 else 1),
ceil_mode=True,
count_include_pad=False)
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
def hybrid_forward(self, F, x):
x = self.pool(x)
x = self.conv(x)
return x
class ResAUnit(HybridBlock):
"""
ResNet(A) unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for the second convolution layer in bottleneck.
dilation : int or tuple/list of 2 int, default 1
Dilation value for the second convolution layer in bottleneck.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
strides,
padding=1,
dilation=1,
bn_use_global_stats=False,
bn_cudnn_off=False,
bottleneck=True,
conv1_stride=False,
**kwargs):
super(ResAUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
conv1_stride=conv1_stride)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
if self.resize_identity:
self.identity_block = ResADownBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_block(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class ResNetA(HybridBlock):
"""
ResNet(A) with average downsampling model from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
dilated : bool, default False
Whether to use dilation.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
bn_use_global_stats=False,
bn_cudnn_off=False,
dilated=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ResNetA, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
if dilated:
strides = 2 if ((j == 0) and (i != 0) and (i < 2)) else 1
dilation = (2 ** max(0, i - 1 - int(j == 0)))
else:
strides = 2 if (j == 0) and (i != 0) else 1
dilation = 1
stage.add(ResAUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=dilation,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_resneta(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ResNet(A) with average downsampling model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet(A) with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNetA(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def resneta10(**kwargs):
"""
ResNet(A)-10 with average downsampling model from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resneta(blocks=10, model_name="resneta10", **kwargs)
def resnetabc14b(**kwargs):
"""
ResNet(A)-BC-14b with average downsampling model from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resneta(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetabc14b", **kwargs)
def resneta18(**kwargs):
"""
ResNet(A)-18 with average downsampling model from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resneta(blocks=18, model_name="resneta18", **kwargs)
def resneta50b(**kwargs):
"""
ResNet(A)-50 with average downsampling model with stride at the second convolution in bottleneck block
from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resneta(blocks=50, conv1_stride=False, model_name="resneta50b", **kwargs)
def resneta101b(**kwargs):
"""
ResNet(A)-101 with average downsampling model with stride at the second convolution in bottleneck
block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resneta(blocks=101, conv1_stride=False, model_name="resneta101b", **kwargs)
def resneta152b(**kwargs):
"""
ResNet(A)-152 with average downsampling model with stride at the second convolution in bottleneck
block from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resneta(blocks=152, conv1_stride=False, model_name="resneta152b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
resneta10,
resnetabc14b,
resneta18,
resneta50b,
resneta101b,
resneta152b,
]
for model in models:
net = model(
pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resneta10 or weight_count == 5438024)
assert (model != resnetabc14b or weight_count == 10084168)
assert (model != resneta18 or weight_count == 11708744)
assert (model != resneta50b or weight_count == 25576264)
assert (model != resneta101b or weight_count == 44568392)
assert (model != resneta152b or weight_count == 60212040)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 17,132 | 34.545643 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/resnesta.py | """
ResNeSt(A) with average downsampling for ImageNet-1K, implemented in Gluon.
Original paper: 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
"""
__all__ = ['ResNeStA', 'resnestabc14', 'resnesta18', 'resnestabc26', 'resnestabc38', 'resnesta50', 'resnesta101',
'resnesta152', 'resnesta200', 'resnesta269', 'ResNeStADownBlock']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, saconv3x3_block
from .senet import SEInitBlock
class ResNeStABlock(HybridBlock):
"""
Simple ResNeSt(A) block for residual path in ResNeSt(A) unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_bias=False,
use_bn=True,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(ResNeStABlock, self).__init__(**kwargs)
self.resize = (strides > 1)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
if self.resize:
self.pool = nn.AvgPool2D(
pool_size=3,
strides=strides,
padding=1)
self.conv2 = saconv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
if self.resize:
x = self.pool(x)
x = self.conv2(x)
return x
class ResNeStABottleneck(HybridBlock):
"""
ResNeSt(A) bottleneck block for residual path in ResNeSt(A) unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats=False,
bn_cudnn_off=False,
bottleneck_factor=4,
**kwargs):
super(ResNeStABottleneck, self).__init__(**kwargs)
self.resize = (strides > 1)
mid_channels = out_channels // bottleneck_factor
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv2 = saconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
if self.resize:
self.pool = nn.AvgPool2D(
pool_size=3,
strides=strides,
padding=1)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
if self.resize:
x = self.pool(x)
x = self.conv3(x)
return x
class ResNeStADownBlock(HybridBlock):
"""
ResNeSt(A) downsample block for the identity branch of a residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(ResNeStADownBlock, self).__init__(**kwargs)
with self.name_scope():
self.pool = nn.AvgPool2D(
pool_size=strides,
strides=strides,
ceil_mode=True,
count_include_pad=False)
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
def hybrid_forward(self, F, x):
x = self.pool(x)
x = self.conv(x)
return x
class ResNeStAUnit(HybridBlock):
"""
ResNeSt(A) unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
bottleneck : bool, default True
Whether to use a bottleneck or simple block in units.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats=False,
bn_cudnn_off=False,
bottleneck=True,
**kwargs):
super(ResNeStAUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if bottleneck:
self.body = ResNeStABottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
else:
self.body = ResNeStABlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
if self.resize_identity:
self.identity_block = ResNeStADownBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_block(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class ResNeStA(HybridBlock):
"""
ResNeSt(A) with average downsampling model from 'ResNeSt: Split-Attention Networks,'
https://arxiv.org/abs/2004.08955.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
dropout_rate=0.0,
bn_use_global_stats=False,
bn_cudnn_off=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ResNeStA, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ResNeStAUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
if dropout_rate > 0.0:
self.output.add(nn.Dropout(rate=dropout_rate))
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_resnesta(blocks,
bottleneck=None,
width_scale=1.0,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ResNeSt(A) with average downsampling model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported ResNeSt(A) with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if blocks >= 101:
init_block_channels *= 2
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNeStA(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def resnestabc14(**kwargs):
"""
ResNeSt(A)-BC-14 with average downsampling model from 'ResNeSt: Split-Attention Networks,'
https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=14, bottleneck=True, model_name="resnestabc14", **kwargs)
def resnesta18(**kwargs):
"""
ResNeSt(A)-18 with average downsampling model from 'ResNeSt: Split-Attention Networks,'
https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=18, model_name="resnesta18", **kwargs)
def resnestabc26(**kwargs):
"""
ResNeSt(A)-BC-26 with average downsampling model from 'ResNeSt: Split-Attention Networks,'
https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=26, bottleneck=True, model_name="resnestabc26", **kwargs)
def resnestabc38(**kwargs):
"""
ResNeSt(A)-BC-38 with average downsampling model from 'ResNeSt: Split-Attention Networks,'
https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=38, bottleneck=True, model_name="resnestabc38", **kwargs)
def resnesta50(**kwargs):
"""
ResNeSt(A)-50 with average downsampling model with stride at the second convolution in bottleneck block
from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=50, model_name="resnesta50", **kwargs)
def resnesta101(**kwargs):
"""
ResNeSt(A)-101 with average downsampling model with stride at the second convolution in bottleneck
block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=101, model_name="resnesta101", **kwargs)
def resnesta152(**kwargs):
"""
ResNeSt(A)-152 with average downsampling model with stride at the second convolution in bottleneck
block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=152, model_name="resnesta152", **kwargs)
def resnesta200(in_size=(256, 256), **kwargs):
"""
ResNeSt(A)-200 with average downsampling model with stride at the second convolution in bottleneck
block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
in_size : tuple of two ints, default (256, 256)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=200, in_size=in_size, dropout_rate=0.2, model_name="resnesta200", **kwargs)
def resnesta269(in_size=(320, 320), **kwargs):
"""
ResNeSt(A)-269 with average downsampling model with stride at the second convolution in bottleneck
block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
in_size : tuple of two ints, default (320, 320)
Spatial size of the expected input image.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnesta(blocks=269, in_size=in_size, dropout_rate=0.2, model_name="resnesta269", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(resnestabc14, 224),
(resnesta18, 224),
(resnestabc26, 224),
(resnestabc38, 224),
(resnesta50, 224),
(resnesta101, 224),
(resnesta152, 224),
(resnesta200, 256),
(resnesta269, 320),
]
for model, size in models:
net = model(
pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnestabc14 or weight_count == 10611688)
assert (model != resnesta18 or weight_count == 12763784)
assert (model != resnestabc26 or weight_count == 17069448)
assert (model != resnestabc38 or weight_count == 23527208)
assert (model != resnesta50 or weight_count == 27483240)
assert (model != resnesta101 or weight_count == 48275016)
assert (model != resnesta152 or weight_count == 65316040)
assert (model != resnesta200 or weight_count == 70201544)
assert (model != resnesta269 or weight_count == 110929480)
batch = 14
x = mx.nd.zeros((batch, 3, size, size), ctx=ctx)
y = net(x)
assert (y.shape == (batch, 1000))
if __name__ == "__main__":
_test()
| 22,512 | 33.849845 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/senet.py | """
SENet for ImageNet-1K, implemented in Gluon.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SENet', 'senet16', 'senet28', 'senet40', 'senet52', 'senet103', 'senet154', 'SEInitBlock']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, SEBlock
class SENetBottleneck(HybridBlock):
"""
SENet bottleneck block for residual path in SENet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bn_use_global_stats,
**kwargs):
super(SENetBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
group_width2 = group_width // 2
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=group_width2,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=group_width2,
out_channels=group_width,
strides=strides,
groups=cardinality,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class SENetUnit(HybridBlock):
"""
SENet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
identity_conv3x3 : bool, default False
Whether to use 3x3 convolution in the identity link.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bn_use_global_stats,
identity_conv3x3,
**kwargs):
super(SENetUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = SENetBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats)
self.se = SEBlock(channels=out_channels)
if self.resize_identity:
if identity_conv3x3:
self.identity_conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
else:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = self.se(x)
x = x + identity
x = self.activ(x)
return x
class SEInitBlock(HybridBlock):
"""
SENet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(SEInitBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.pool(x)
return x
class SENet(HybridBlock):
"""
SENet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SENet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
identity_conv3x3 = (i != 0)
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SENetUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats,
identity_conv3x3=identity_conv3x3))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dropout(rate=0.2))
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_senet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SENet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 16:
layers = [1, 1, 1, 1]
cardinality = 32
elif blocks == 28:
layers = [2, 2, 2, 2]
cardinality = 32
elif blocks == 40:
layers = [3, 3, 3, 3]
cardinality = 32
elif blocks == 52:
layers = [3, 4, 6, 3]
cardinality = 32
elif blocks == 103:
layers = [3, 4, 23, 3]
cardinality = 32
elif blocks == 154:
layers = [3, 8, 36, 3]
cardinality = 64
else:
raise ValueError("Unsupported SENet with number of blocks: {}".format(blocks))
bottleneck_width = 4
init_block_channels = 128
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SENet(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def senet16(**kwargs):
"""
SENet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_senet(blocks=16, model_name="senet16", **kwargs)
def senet28(**kwargs):
"""
SENet-28 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_senet(blocks=28, model_name="senet28", **kwargs)
def senet40(**kwargs):
"""
SENet-40 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_senet(blocks=40, model_name="senet40", **kwargs)
def senet52(**kwargs):
"""
SENet-52 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_senet(blocks=52, model_name="senet52", **kwargs)
def senet103(**kwargs):
"""
SENet-103 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_senet(blocks=103, model_name="senet103", **kwargs)
def senet154(**kwargs):
"""
SENet-154 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_senet(blocks=154, model_name="senet154", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
senet16,
senet28,
senet40,
senet52,
senet103,
senet154,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != senet16 or weight_count == 31366168)
assert (model != senet28 or weight_count == 36453768)
assert (model != senet40 or weight_count == 41541368)
assert (model != senet52 or weight_count == 44659416)
assert (model != senet103 or weight_count == 60963096)
assert (model != senet154 or weight_count == 115088984)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 15,863 | 31.641975 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/diapreresnet_cifar.py | """
DIA-PreResNet for CIFAR/SVHN, implemented in Gluon.
Original papers: 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
"""
__all__ = ['CIFARDIAPreResNet', 'diapreresnet20_cifar10', 'diapreresnet20_cifar100', 'diapreresnet20_svhn',
'diapreresnet56_cifar10', 'diapreresnet56_cifar100', 'diapreresnet56_svhn', 'diapreresnet110_cifar10',
'diapreresnet110_cifar100', 'diapreresnet110_svhn', 'diapreresnet164bn_cifar10',
'diapreresnet164bn_cifar100', 'diapreresnet164bn_svhn', 'diapreresnet1001_cifar10',
'diapreresnet1001_cifar100', 'diapreresnet1001_svhn', 'diapreresnet1202_cifar10',
'diapreresnet1202_cifar100', 'diapreresnet1202_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3, DualPathSequential
from .preresnet import PreResActivation
from .diaresnet import DIAAttention
from .diapreresnet import DIAPreResUnit
class CIFARDIAPreResNet(HybridBlock):
"""
DIA-PreResNet model for CIFAR from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARDIAPreResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential(
return_two=False,
prefix="stage{}_".format(i + 1))
attention = DIAAttention(
in_x_features=channels_per_stage[0],
in_h_features=channels_per_stage[0])
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(DIAPreResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=False,
attention=attention))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_diapreresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DIA-PreResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARDIAPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def diapreresnet20_cifar10(classes=10, **kwargs):
"""
DIA-PreResNet-20 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="diapreresnet20_cifar10",
**kwargs)
def diapreresnet20_cifar100(classes=100, **kwargs):
"""
DIA-PreResNet-20 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="diapreresnet20_cifar100",
**kwargs)
def diapreresnet20_svhn(classes=10, **kwargs):
"""
DIA-PreResNet-20 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="diapreresnet20_svhn",
**kwargs)
def diapreresnet56_cifar10(classes=10, **kwargs):
"""
DIA-PreResNet-56 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="diapreresnet56_cifar10",
**kwargs)
def diapreresnet56_cifar100(classes=100, **kwargs):
"""
DIA-PreResNet-56 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="diapreresnet56_cifar100",
**kwargs)
def diapreresnet56_svhn(classes=10, **kwargs):
"""
DIA-PreResNet-56 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="diapreresnet56_svhn",
**kwargs)
def diapreresnet110_cifar10(classes=10, **kwargs):
"""
DIA-PreResNet-110 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="diapreresnet110_cifar10",
**kwargs)
def diapreresnet110_cifar100(classes=100, **kwargs):
"""
DIA-PreResNet-110 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="diapreresnet110_cifar100",
**kwargs)
def diapreresnet110_svhn(classes=10, **kwargs):
"""
DIA-PreResNet-110 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="diapreresnet110_svhn",
**kwargs)
def diapreresnet164bn_cifar10(classes=10, **kwargs):
"""
DIA-PreResNet-164(BN) model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="diapreresnet164bn_cifar10",
**kwargs)
def diapreresnet164bn_cifar100(classes=100, **kwargs):
"""
DIA-PreResNet-164(BN) model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="diapreresnet164bn_cifar100",
**kwargs)
def diapreresnet164bn_svhn(classes=10, **kwargs):
"""
DIA-PreResNet-164(BN) model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="diapreresnet164bn_svhn",
**kwargs)
def diapreresnet1001_cifar10(classes=10, **kwargs):
"""
DIA-PreResNet-1001 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="diapreresnet1001_cifar10",
**kwargs)
def diapreresnet1001_cifar100(classes=100, **kwargs):
"""
DIA-PreResNet-1001 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="diapreresnet1001_cifar100",
**kwargs)
def diapreresnet1001_svhn(classes=10, **kwargs):
"""
DIA-PreResNet-1001 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="diapreresnet1001_svhn",
**kwargs)
def diapreresnet1202_cifar10(classes=10, **kwargs):
"""
DIA-PreResNet-1202 model for CIFAR-10 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="diapreresnet1202_cifar10",
**kwargs)
def diapreresnet1202_cifar100(classes=100, **kwargs):
"""
DIA-PreResNet-1202 model for CIFAR-100 from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=1202, bottleneck=False,
model_name="diapreresnet1202_cifar100", **kwargs)
def diapreresnet1202_svhn(classes=10, **kwargs):
"""
DIA-PreResNet-1202 model for SVHN from 'DIANet: Dense-and-Implicit Attention Network,'
https://arxiv.org/abs/1905.10671.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="diapreresnet1202_svhn",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(diapreresnet20_cifar10, 10),
(diapreresnet20_cifar100, 100),
(diapreresnet20_svhn, 10),
(diapreresnet56_cifar10, 10),
(diapreresnet56_cifar100, 100),
(diapreresnet56_svhn, 10),
(diapreresnet110_cifar10, 10),
(diapreresnet110_cifar100, 100),
(diapreresnet110_svhn, 10),
(diapreresnet164bn_cifar10, 10),
(diapreresnet164bn_cifar100, 100),
(diapreresnet164bn_svhn, 10),
(diapreresnet1001_cifar10, 10),
(diapreresnet1001_cifar100, 100),
(diapreresnet1001_svhn, 10),
(diapreresnet1202_cifar10, 10),
(diapreresnet1202_cifar100, 100),
(diapreresnet1202_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != diapreresnet20_cifar10 or weight_count == 286674)
assert (model != diapreresnet20_cifar100 or weight_count == 292524)
assert (model != diapreresnet20_svhn or weight_count == 286674)
assert (model != diapreresnet56_cifar10 or weight_count == 869970)
assert (model != diapreresnet56_cifar100 or weight_count == 875820)
assert (model != diapreresnet56_svhn or weight_count == 869970)
assert (model != diapreresnet110_cifar10 or weight_count == 1744914)
assert (model != diapreresnet110_cifar100 or weight_count == 1750764)
assert (model != diapreresnet110_svhn or weight_count == 1744914)
assert (model != diapreresnet164bn_cifar10 or weight_count == 1922106)
assert (model != diapreresnet164bn_cifar100 or weight_count == 1945236)
assert (model != diapreresnet164bn_svhn or weight_count == 1922106)
assert (model != diapreresnet1001_cifar10 or weight_count == 10546554)
assert (model != diapreresnet1001_cifar100 or weight_count == 10569684)
assert (model != diapreresnet1001_svhn or weight_count == 10546554)
assert (model != diapreresnet1202_cifar10 or weight_count == 19438226)
assert (model != diapreresnet1202_cifar100 or weight_count == 19444076)
assert (model != diapreresnet1202_svhn or weight_count == 19438226)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 22,523 | 36.855462 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/simplepose_coco.py | """
SimplePose for COCO Keypoint, implemented in Gluon.
Original paper: 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
"""
__all__ = ['SimplePose', 'simplepose_resnet18_coco', 'simplepose_resnet50b_coco', 'simplepose_resnet101b_coco',
'simplepose_resnet152b_coco', 'simplepose_resneta50b_coco', 'simplepose_resneta101b_coco',
'simplepose_resneta152b_coco', 'HeatmapMaxDetBlock']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import DeconvBlock, conv1x1, HeatmapMaxDetBlock
from .resnet import resnet18, resnet50b, resnet101b, resnet152b
from .resneta import resneta50b, resneta101b, resneta152b
class SimplePose(HybridBlock):
"""
SimplePose model from 'Simple Baselines for Human Pose Estimation and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
channels : list of int
Number of output channels for each decoder unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
return_heatmap : bool, default False
Whether to return only heatmap.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (256, 192)
Spatial size of the expected input image.
keypoints : int, default 17
Number of keypoints.
"""
def __init__(self,
backbone,
backbone_out_channels,
channels,
bn_use_global_stats=False,
bn_cudnn_off=True,
return_heatmap=False,
fixed_size=True,
in_channels=3,
in_size=(256, 192),
keypoints=17,
**kwargs):
super(SimplePose, self).__init__(**kwargs)
assert (in_channels == 3)
self.in_size = in_size
self.keypoints = keypoints
self.return_heatmap = return_heatmap
with self.name_scope():
self.backbone = backbone
self.decoder = nn.HybridSequential(prefix="")
in_channels = backbone_out_channels
for out_channels in channels:
self.decoder.add(DeconvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=4,
strides=2,
padding=1,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = out_channels
self.decoder.add(conv1x1(
in_channels=in_channels,
out_channels=keypoints,
use_bias=True))
self.heatmap_max_det = HeatmapMaxDetBlock(
channels=keypoints,
in_size=(in_size[0] // 4, in_size[1] // 4),
fixed_size=fixed_size)
def hybrid_forward(self, F, x):
x = self.backbone(x)
heatmap = self.decoder(x)
if self.return_heatmap:
return heatmap
else:
keypoints = self.heatmap_max_det(heatmap)
return keypoints
def get_simplepose(backbone,
backbone_out_channels,
keypoints,
bn_cudnn_off,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SimplePose model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
keypoints : int
Number of keypoints.
bn_cudnn_off : bool
Whether to disable CUDNN batch normalization operator.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = [256, 256, 256]
net = SimplePose(
backbone=backbone,
backbone_out_channels=backbone_out_channels,
channels=channels,
bn_cudnn_off=bn_cudnn_off,
keypoints=keypoints,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def simplepose_resnet18_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose model on the base of ResNet-18 for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and
Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnet18(pretrained=pretrained_backbone, bn_cudnn_off=bn_cudnn_off).features[:-1]
return get_simplepose(backbone=backbone, backbone_out_channels=512, keypoints=keypoints, bn_cudnn_off=bn_cudnn_off,
model_name="simplepose_resnet18_coco", **kwargs)
def simplepose_resnet50b_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose model on the base of ResNet-50b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation and
Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnet50b(pretrained=pretrained_backbone, bn_cudnn_off=bn_cudnn_off).features[:-1]
return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, bn_cudnn_off=bn_cudnn_off,
model_name="simplepose_resnet50b_coco", **kwargs)
def simplepose_resnet101b_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose model on the base of ResNet-101b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnet101b(pretrained=pretrained_backbone, bn_cudnn_off=bn_cudnn_off).features[:-1]
return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, bn_cudnn_off=bn_cudnn_off,
model_name="simplepose_resnet101b_coco", **kwargs)
def simplepose_resnet152b_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose model on the base of ResNet-152b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnet152b(pretrained=pretrained_backbone, bn_cudnn_off=bn_cudnn_off).features[:-1]
return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, bn_cudnn_off=bn_cudnn_off,
model_name="simplepose_resnet152b_coco", **kwargs)
def simplepose_resneta50b_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose model on the base of ResNet(A)-50b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resneta50b(pretrained=pretrained_backbone, bn_cudnn_off=bn_cudnn_off).features[:-1]
return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, bn_cudnn_off=bn_cudnn_off,
model_name="simplepose_resneta50b_coco", **kwargs)
def simplepose_resneta101b_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose model on the base of ResNet(A)-101b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resneta101b(pretrained=pretrained_backbone, bn_cudnn_off=bn_cudnn_off).features[:-1]
return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, bn_cudnn_off=bn_cudnn_off,
model_name="simplepose_resneta101b_coco", **kwargs)
def simplepose_resneta152b_coco(pretrained_backbone=False, keypoints=17, bn_cudnn_off=True, **kwargs):
"""
SimplePose model on the base of ResNet(A)-152b for COCO Keypoint from 'Simple Baselines for Human Pose Estimation
and Tracking,' https://arxiv.org/abs/1804.06208.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
keypoints : int, default 17
Number of keypoints.
bn_cudnn_off : bool, default True
Whether to disable CUDNN batch normalization operator.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resneta152b(pretrained=pretrained_backbone, bn_cudnn_off=bn_cudnn_off).features[:-1]
return get_simplepose(backbone=backbone, backbone_out_channels=2048, keypoints=keypoints, bn_cudnn_off=bn_cudnn_off,
model_name="simplepose_resneta152b_coco", **kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (256, 192)
keypoints = 17
return_heatmap = True
pretrained = False
models = [
simplepose_resnet18_coco,
simplepose_resnet50b_coco,
simplepose_resnet101b_coco,
simplepose_resnet152b_coco,
simplepose_resneta50b_coco,
simplepose_resneta101b_coco,
simplepose_resneta152b_coco,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != simplepose_resnet18_coco or weight_count == 15376721)
assert (model != simplepose_resnet50b_coco or weight_count == 33999697)
assert (model != simplepose_resnet101b_coco or weight_count == 52991825)
assert (model != simplepose_resnet152b_coco or weight_count == 68635473)
assert (model != simplepose_resneta50b_coco or weight_count == 34018929)
assert (model != simplepose_resneta101b_coco or weight_count == 53011057)
assert (model != simplepose_resneta152b_coco or weight_count == 68654705)
batch = 14
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert ((y.shape[0] == batch) and (y.shape[1] == keypoints))
if return_heatmap:
assert ((y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4))
else:
assert (y.shape[2] == 3)
if __name__ == "__main__":
_test()
| 15,538 | 39.571802 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/vovnet.py | """
VoVNet for ImageNet-1K, implemented in Gluon.
Original paper: 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
"""
__all__ = ['VoVNet', 'vovnet27s', 'vovnet39', 'vovnet57']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, SequentialConcurrent
class VoVUnit(HybridBlock):
"""
VoVNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
branch_channels : int
Number of output channels for each branch.
num_branches : int
Number of branches.
resize : bool
Whether to use resize block.
use_residual : bool
Whether to use residual block.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
branch_channels,
num_branches,
resize,
use_residual,
bn_use_global_stats=False,
**kwargs):
super(VoVUnit, self).__init__(**kwargs)
self.resize = resize
self.use_residual = use_residual
with self.name_scope():
if self.resize:
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
ceil_mode=True)
self.branches = SequentialConcurrent(prefix="")
with self.branches.name_scope():
branch_in_channels = in_channels
for i in range(num_branches):
self.branches.add(conv3x3_block(
in_channels=branch_in_channels,
out_channels=branch_channels,
bn_use_global_stats=bn_use_global_stats))
branch_in_channels = branch_channels
self.concat_conv = conv1x1_block(
in_channels=(in_channels + num_branches * branch_channels),
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
if self.resize:
x = self.pool(x)
if self.use_residual:
identity = x
x = self.branches(x)
x = self.concat_conv(x)
if self.use_residual:
x = x + identity
return x
class VoVInitBlock(HybridBlock):
"""
VoVNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats=False,
**kwargs):
super(VoVInitBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class VoVNet(HybridBlock):
"""
VoVNet model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
branch_channels : list of list of int
Number of branch output channels for each unit.
num_branches : int
Number of branches for the each unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
branch_channels,
num_branches,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(VoVNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
init_block_channels = 128
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(VoVInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
use_residual = (j != 0)
resize = (j == 0) and (i != 0)
stage.add(VoVUnit(
in_channels=in_channels,
out_channels=out_channels,
branch_channels=branch_channels[i][j],
num_branches=num_branches,
resize=resize,
use_residual=use_residual,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_vovnet(blocks,
slim=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
slim : bool, default False
Whether to use a slim model.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 27:
layers = [1, 1, 1, 1]
elif blocks == 39:
layers = [1, 1, 2, 2]
elif blocks == 57:
layers = [1, 1, 4, 3]
else:
raise ValueError("Unsupported VoVNet with number of blocks: {}".format(blocks))
assert (sum(layers) * 6 + 3 == blocks)
num_branches = 5
channels_per_layers = [256, 512, 768, 1024]
branch_channels_per_layers = [128, 160, 192, 224]
if slim:
channels_per_layers = [ci // 2 for ci in channels_per_layers]
branch_channels_per_layers = [ci // 2 for ci in branch_channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
branch_channels = [[ci] * li for (ci, li) in zip(branch_channels_per_layers, layers)]
net = VoVNet(
channels=channels,
branch_channels=branch_channels,
num_branches=num_branches,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def vovnet27s(**kwargs):
"""
VoVNet-27-slim model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vovnet(blocks=27, slim=True, model_name="vovnet27s", **kwargs)
def vovnet39(**kwargs):
"""
VoVNet-39 model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vovnet(blocks=39, model_name="vovnet39", **kwargs)
def vovnet57(**kwargs):
"""
VoVNet-57 model from 'An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection,'
https://arxiv.org/abs/1904.09730.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_vovnet(blocks=57, model_name="vovnet57", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
vovnet27s,
vovnet39,
vovnet57,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != vovnet27s or weight_count == 3525736)
assert (model != vovnet39 or weight_count == 22600296)
assert (model != vovnet57 or weight_count == 36640296)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 11,803 | 32.064426 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/crunetb.py | """
CRU-Net(b), implemented in Gluon.
Original paper: 'Sharing Residual Units Through Collective Tensor Factorization To Improve Deep Neural Networks,'
https://www.ijcai.org/proceedings/2018/88.
"""
__all__ = ['CRUNetb', 'crunet56b', 'crunet116b']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3, pre_conv1x1_block, pre_conv3x3_block
from .resnet import ResInitBlock
from .preresnet import PreResActivation
class CRUConvBlock(HybridBlock):
"""
CRU-Net specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
shared_conv : HybridBlock, default None
Shared convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
groups=1,
bn_use_global_stats=False,
return_preact=False,
shared_conv=None,
**kwargs):
super(CRUConvBlock, self).__init__(**kwargs)
self.return_preact = return_preact
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
if shared_conv is None:
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
use_bias=False,
in_channels=in_channels)
else:
self.conv = shared_conv
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def cru_conv1x1_block(in_channels,
out_channels,
strides=1,
bn_use_global_stats=False,
return_preact=False,
shared_conv=None):
"""
1x1 version of the CRU-Net specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation.
shared_conv : HybridBlock, default None
Shared convolution layer.
"""
return CRUConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
bn_use_global_stats=bn_use_global_stats,
return_preact=return_preact,
shared_conv=shared_conv)
class ResBottleneck(HybridBlock):
"""
Pre-ResNeXt bottleneck block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bn_use_global_stats,
**kwargs):
super(ResBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=group_width,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv3x3_block(
in_channels=group_width,
out_channels=group_width,
strides=strides,
groups=cardinality,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = pre_conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class CRUBottleneck(HybridBlock):
"""
CRU-Net bottleneck block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
group_width: int
Group width parameter.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
shared_conv1 : HybridBlock, default None
Shared convolution layer #1.
shared_conv2 : HybridBlock, default None
Shared convolution layer #2.
"""
def __init__(self,
in_channels,
out_channels,
strides,
group_width,
bn_use_global_stats,
shared_conv1=None,
shared_conv2=None,
**kwargs):
super(CRUBottleneck, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = cru_conv1x1_block(
in_channels=in_channels,
out_channels=group_width,
bn_use_global_stats=bn_use_global_stats,
shared_conv=shared_conv1)
if shared_conv2 is None:
self.conv2 = conv3x3(
in_channels=group_width,
out_channels=group_width,
strides=strides,
groups=group_width)
else:
self.conv2 = shared_conv2
self.conv3 = pre_conv1x1_block(
in_channels=group_width,
out_channels=group_width,
bn_use_global_stats=bn_use_global_stats)
self.conv4 = pre_conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
return x
class ResUnit(HybridBlock):
"""
CRU-Net residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bn_use_global_stats,
**kwargs):
super(ResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = pre_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
return x
class CRUUnit(HybridBlock):
"""
CRU-Net collective residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
group_width: int
Group width parameter.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
shared_conv1 : HybridBlock, default None
Shared convolution layer #1.
shared_conv2 : HybridBlock, default None
Shared convolution layer #2.
"""
def __init__(self,
in_channels,
out_channels,
strides,
group_width,
bn_use_global_stats,
shared_conv1=None,
shared_conv2=None,
**kwargs):
super(CRUUnit, self).__init__(**kwargs)
assert (strides == 1) or ((shared_conv1 is None) and (shared_conv2 is None))
self.resize_input = (in_channels != out_channels)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if self.resize_input:
self.input_conv = pre_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
self.body = CRUBottleneck(
in_channels=out_channels,
out_channels=out_channels,
strides=strides,
group_width=group_width,
bn_use_global_stats=bn_use_global_stats,
shared_conv1=shared_conv1,
shared_conv2=shared_conv2)
if self.resize_identity:
# assert (self.input_conv.conv._kwargs["stride"][0] == strides)
self.identity_conv = cru_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
shared_conv=self.input_conv.conv if strides == 1 else None)
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
if self.resize_input:
x = self.input_conv(x)
x = self.body(x)
x = x + identity
return x
class CRUNetb(HybridBlock):
"""
CRU-Net(b) model from 'Sharing Residual Units Through Collective Tensor Factorization To Improve Deep Neural
Networks,' https://www.ijcai.org/proceedings/2018/88.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
group_widths: list of int
List of group width parameters.
refresh_steps: list of int
List of refresh step parameters.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
group_widths,
refresh_steps,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(CRUNetb, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
group_width = group_widths[i]
refresh_step = refresh_steps[i]
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
if group_width != 0:
if ((refresh_step == 0) and (j == 0)) or ((refresh_step != 0) and (j % refresh_step == 0)):
shared_conv1 = None
shared_conv2 = None
if (shared_conv2 is not None) and (shared_conv2._kwargs["stride"][0] != 1) and\
(strides == 1):
shared_conv2 = None
unit = CRUUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
group_width=group_width,
bn_use_global_stats=bn_use_global_stats,
shared_conv1=shared_conv1,
shared_conv2=shared_conv2)
if shared_conv1 is None:
shared_conv1 = unit.body.conv1.conv
if shared_conv2 is None:
shared_conv2 = unit.body.conv2
stage.add(unit)
else:
stage.add(ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_crunetb(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create CRU-Net(b) model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
cardinality = 32
bottleneck_width = 4
if blocks == 56:
layers = [3, 4, 6, 3]
group_widths = [0, 0, 640, 0]
refresh_steps = [0, 0, 0, 0]
elif blocks == 116:
layers = [3, 6, 18, 3]
group_widths = [0, 352, 704, 0]
refresh_steps = [0, 0, 6, 0]
else:
raise ValueError("Unsupported CRU-Net(b) with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = CRUNetb(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
group_widths=group_widths,
refresh_steps=refresh_steps,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def crunet56b(**kwargs):
"""
CRU-Net-56(b) model from 'Sharing Residual Units Through Collective Tensor Factorization To Improve Deep Neural
Networks,' https://www.ijcai.org/proceedings/2018/88.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_crunetb(blocks=56, model_name="crunet56b", **kwargs)
def crunet116b(**kwargs):
"""
CRU-Net-116(b) model from 'Sharing Residual Units Through Collective Tensor Factorization To Improve Deep Neural
Networks,' https://www.ijcai.org/proceedings/2018/88.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_crunetb(blocks=116, model_name="crunet116b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
crunet56b,
crunet116b,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != crunet56b or weight_count == 26139432)
assert (model != crunet116b or weight_count == 44321000)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 20,687 | 33.138614 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/espnetv2.py | """
ESPNetv2 for ImageNet-1K, implemented in Gluon.
Original paper: 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,'
https://arxiv.org/abs/1811.11431.
"""
__all__ = ['ESPNetv2', 'espnetv2_wd2', 'espnetv2_w1', 'espnetv2_w5d4', 'espnetv2_w3d2', 'espnetv2_w2']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import PReLU2, conv3x3, conv1x1_block, conv3x3_block, DualPathSequential
class PreActivation(HybridBlock):
"""
PreResNet like pure pre-activation block without convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_use_global_stats=False,
**kwargs):
super(PreActivation, self).__init__(**kwargs)
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.activ = PReLU2(in_channels=in_channels)
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
return x
class ShortcutBlock(HybridBlock):
"""
ESPNetv2 shortcut block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(ShortcutBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
bn_use_global_stats=bn_use_global_stats,
activation=(lambda: PReLU2(in_channels)))
self.conv2 = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class HierarchicalConcurrent(nn.HybridSequential):
"""
A container for hierarchical concatenation of blocks with parameters.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
"""
def __init__(self,
axis=1,
**kwargs):
super(HierarchicalConcurrent, self).__init__(**kwargs)
self.axis = axis
def hybrid_forward(self, F, x):
out = []
y_prev = None
for block in self._children.values():
y = block(x)
if y_prev is not None:
y = y + y_prev
out.append(y)
y_prev = y
out = F.concat(*out, dim=self.axis)
return out
class ESPBlock(HybridBlock):
"""
ESPNetv2 block (so-called EESP block).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the branch convolution layers.
dilations : list of int
Dilation values for branches.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
dilations,
bn_use_global_stats,
**kwargs):
super(ESPBlock, self).__init__(**kwargs)
num_branches = len(dilations)
assert (out_channels % num_branches == 0)
self.downsample = (strides != 1)
mid_channels = out_channels // num_branches
with self.name_scope():
self.reduce_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
groups=num_branches,
bn_use_global_stats=bn_use_global_stats,
activation=(lambda: PReLU2(mid_channels)))
self.branches = HierarchicalConcurrent(axis=1, prefix="")
for i in range(num_branches):
self.branches.add(conv3x3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
padding=dilations[i],
dilation=dilations[i],
groups=mid_channels))
self.merge_conv = conv1x1_block(
in_channels=out_channels,
out_channels=out_channels,
groups=num_branches,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.preactiv = PreActivation(in_channels=out_channels)
if not self.downsample:
self.activ = PReLU2(out_channels)
def hybrid_forward(self, F, x, x0):
y = self.reduce_conv(x)
y = self.branches(y)
y = self.preactiv(y)
y = self.merge_conv(y)
if not self.downsample:
y = y + x
y = self.activ(y)
return y, x0
class DownsampleBlock(HybridBlock):
"""
ESPNetv2 downsample block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
x0_channels : int
Number of input channels for shortcut.
dilations : list of int
Dilation values for branches in EESP block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
x0_channels,
dilations,
bn_use_global_stats,
**kwargs):
super(DownsampleBlock, self).__init__(**kwargs)
inc_channels = out_channels - in_channels
with self.name_scope():
self.pool = nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1)
self.eesp = ESPBlock(
in_channels=in_channels,
out_channels=inc_channels,
strides=2,
dilations=dilations,
bn_use_global_stats=bn_use_global_stats)
self.shortcut_block = ShortcutBlock(
in_channels=x0_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
self.activ = PReLU2(out_channels)
def hybrid_forward(self, F, x, x0):
y1 = self.pool(x)
y2, _ = self.eesp(x, None)
x = F.concat(y1, y2, dim=1)
x0 = self.pool(x0)
y3 = self.shortcut_block(x0)
x = x + y3
x = self.activ(x)
return x, x0
class ESPInitBlock(HybridBlock):
"""
ESPNetv2 initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(ESPInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
activation=(lambda: PReLU2(out_channels)))
self.pool = nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x, x0):
x = self.conv(x)
x0 = self.pool(x0)
return x, x0
class ESPFinalBlock(HybridBlock):
"""
ESPNetv2 final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
final_groups : int
Number of groups in the last convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
final_groups,
bn_use_global_stats,
**kwargs):
super(ESPFinalBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
groups=in_channels,
bn_use_global_stats=bn_use_global_stats,
activation=(lambda: PReLU2(in_channels)))
self.conv2 = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
groups=final_groups,
bn_use_global_stats=bn_use_global_stats,
activation=(lambda: PReLU2(out_channels)))
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class ESPNetv2(HybridBlock):
"""
ESPNetv2 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural Network,'
https://arxiv.org/abs/1811.11431.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final unit.
final_block_groups : int
Number of groups for the final unit.
dilations : list of list of list of int
Dilation values for branches in each unit.
dropout_rate : float, default 0.2
Parameter of Dropout layer. Faction of the input units to drop.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
final_block_groups,
dilations,
dropout_rate=0.2,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000):
super(ESPNetv2, self).__init__()
self.in_size = in_size
self.classes = classes
x0_channels = in_channels
with self.name_scope():
self.features = DualPathSequential(
return_two=False,
first_ordinals=0,
last_ordinals=2,
prefix="")
self.features.add(ESPInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential(prefix="stage{}_".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
if j == 0:
unit = DownsampleBlock(
in_channels=in_channels,
out_channels=out_channels,
x0_channels=x0_channels,
dilations=dilations[i][j],
bn_use_global_stats=bn_use_global_stats)
else:
unit = ESPBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=1,
dilations=dilations[i][j],
bn_use_global_stats=bn_use_global_stats)
stage.add(unit)
in_channels = out_channels
self.features.add(stage)
self.features.add(ESPFinalBlock(
in_channels=in_channels,
out_channels=final_block_channels,
final_groups=final_block_groups,
bn_use_global_stats=bn_use_global_stats))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dropout(rate=dropout_rate))
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x, x)
x = self.output(x)
return x
def get_espnetv2(width_scale,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ESPNetv2 model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (width_scale <= 2.0)
branches = 4
layers = [1, 4, 8, 4]
max_dilation_list = [6, 5, 4, 3, 2]
max_dilations = [[max_dilation_list[i]] + [max_dilation_list[i + 1]] * (li - 1) for (i, li) in enumerate(layers)]
dilations = [[sorted([k + 1 if k < dij else 1 for k in range(branches)]) for dij in di] for di in max_dilations]
base_channels = 32
weighed_base_channels = math.ceil(float(math.floor(base_channels * width_scale)) / branches) * branches
channels_per_layers = [weighed_base_channels * pow(2, i + 1) for i in range(len(layers))]
init_block_channels = base_channels if weighed_base_channels > base_channels else weighed_base_channels
final_block_channels = 1024 if width_scale <= 1.5 else 1280
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = ESPNetv2(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
final_block_groups=branches,
dilations=dilations,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ignore_extra=True,
ctx=ctx)
return net
def espnetv2_wd2(**kwargs):
"""
ESPNetv2 x0.5 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural
Network,' https://arxiv.org/abs/1811.11431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_espnetv2(width_scale=0.5, model_name="espnetv2_wd2", **kwargs)
def espnetv2_w1(**kwargs):
"""
ESPNetv2 x1.0 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural
Network,' https://arxiv.org/abs/1811.11431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_espnetv2(width_scale=1.0, model_name="espnetv2_w1", **kwargs)
def espnetv2_w5d4(**kwargs):
"""
ESPNetv2 x1.25 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural
Network,' https://arxiv.org/abs/1811.11431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_espnetv2(width_scale=1.25, model_name="espnetv2_w5d4", **kwargs)
def espnetv2_w3d2(**kwargs):
"""
ESPNetv2 x1.5 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural
Network,' https://arxiv.org/abs/1811.11431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_espnetv2(width_scale=1.5, model_name="espnetv2_w3d2", **kwargs)
def espnetv2_w2(**kwargs):
"""
ESPNetv2 x2.0 model from 'ESPNetv2: A Light-weight, Power Efficient, and General Purpose Convolutional Neural
Network,' https://arxiv.org/abs/1811.11431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_espnetv2(width_scale=2.0, model_name="espnetv2_w2", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
espnetv2_wd2,
espnetv2_w1,
espnetv2_w5d4,
espnetv2_w3d2,
espnetv2_w2,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != espnetv2_wd2 or weight_count == 1241092)
assert (model != espnetv2_w1 or weight_count == 1669592)
assert (model != espnetv2_w5d4 or weight_count == 1964832)
assert (model != espnetv2_w3d2 or weight_count == 2314120)
assert (model != espnetv2_w2 or weight_count == 3497144)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 20,193 | 32.600666 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/shufflenet.py | """
ShuffleNet for ImageNet-1K, implemented in Gluon.
Original paper: 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
"""
__all__ = ['ShuffleNet', 'shufflenet_g1_w1', 'shufflenet_g2_w1', 'shufflenet_g3_w1', 'shufflenet_g4_w1',
'shufflenet_g8_w1', 'shufflenet_g1_w3d4', 'shufflenet_g3_w3d4', 'shufflenet_g1_wd2', 'shufflenet_g3_wd2',
'shufflenet_g1_wd4', 'shufflenet_g3_wd4']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv3x3, depthwise_conv3x3, ChannelShuffle
class ShuffleUnit(HybridBlock):
"""
ShuffleNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups in convolution layers.
downsample : bool
Whether do downsample.
ignore_group : bool
Whether ignore group value in the first convolution layer.
"""
def __init__(self,
in_channels,
out_channels,
groups,
downsample,
ignore_group,
**kwargs):
super(ShuffleUnit, self).__init__(**kwargs)
self.downsample = downsample
mid_channels = out_channels // 4
if downsample:
out_channels -= in_channels
with self.name_scope():
self.compress_conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
groups=(1 if ignore_group else groups))
self.compress_bn1 = nn.BatchNorm(in_channels=mid_channels)
self.c_shuffle = ChannelShuffle(
channels=mid_channels,
groups=groups)
self.dw_conv2 = depthwise_conv3x3(
channels=mid_channels,
strides=(2 if self.downsample else 1))
self.dw_bn2 = nn.BatchNorm(in_channels=mid_channels)
self.expand_conv3 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
groups=groups)
self.expand_bn3 = nn.BatchNorm(in_channels=out_channels)
if downsample:
self.avgpool = nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
identity = x
x = self.compress_conv1(x)
x = self.compress_bn1(x)
x = self.activ(x)
x = self.c_shuffle(x)
x = self.dw_conv2(x)
x = self.dw_bn2(x)
x = self.expand_conv3(x)
x = self.expand_bn3(x)
if self.downsample:
identity = self.avgpool(identity)
x = F.concat(x, identity, dim=1)
else:
x = x + identity
x = self.activ(x)
return x
class ShuffleInitBlock(HybridBlock):
"""
ShuffleNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(ShuffleInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv3x3(
in_channels=in_channels,
out_channels=out_channels,
strides=2)
self.bn = nn.BatchNorm(in_channels=out_channels)
self.activ = nn.Activation("relu")
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.bn(x)
x = self.activ(x)
x = self.pool(x)
return x
class ShuffleNet(HybridBlock):
"""
ShuffleNet model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
groups : int
Number of groups in convolution layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
groups,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ShuffleNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ShuffleInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
ignore_group = (i == 0) and (j == 0)
stage.add(ShuffleUnit(
in_channels=in_channels,
out_channels=out_channels,
groups=groups,
downsample=downsample,
ignore_group=ignore_group))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_shufflenet(groups,
width_scale,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ShuffleNet model with specific parameters.
Parameters:
----------
groups : int
Number of groups in convolution layers.
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 24
layers = [4, 8, 4]
if groups == 1:
channels_per_layers = [144, 288, 576]
elif groups == 2:
channels_per_layers = [200, 400, 800]
elif groups == 3:
channels_per_layers = [240, 480, 960]
elif groups == 4:
channels_per_layers = [272, 544, 1088]
elif groups == 8:
channels_per_layers = [384, 768, 1536]
else:
raise ValueError("The {} of groups is not supported".format(groups))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
net = ShuffleNet(
channels=channels,
init_block_channels=init_block_channels,
groups=groups,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def shufflenet_g1_w1(**kwargs):
"""
ShuffleNet 1x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=1, width_scale=1.0, model_name="shufflenet_g1_w1", **kwargs)
def shufflenet_g2_w1(**kwargs):
"""
ShuffleNet 1x (g=2) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=2, width_scale=1.0, model_name="shufflenet_g2_w1", **kwargs)
def shufflenet_g3_w1(**kwargs):
"""
ShuffleNet 1x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=3, width_scale=1.0, model_name="shufflenet_g3_w1", **kwargs)
def shufflenet_g4_w1(**kwargs):
"""
ShuffleNet 1x (g=4) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=4, width_scale=1.0, model_name="shufflenet_g4_w1", **kwargs)
def shufflenet_g8_w1(**kwargs):
"""
ShuffleNet 1x (g=8) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=8, width_scale=1.0, model_name="shufflenet_g8_w1", **kwargs)
def shufflenet_g1_w3d4(**kwargs):
"""
ShuffleNet 0.75x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=1, width_scale=0.75, model_name="shufflenet_g1_w3d4", **kwargs)
def shufflenet_g3_w3d4(**kwargs):
"""
ShuffleNet 0.75x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=3, width_scale=0.75, model_name="shufflenet_g3_w3d4", **kwargs)
def shufflenet_g1_wd2(**kwargs):
"""
ShuffleNet 0.5x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=1, width_scale=0.5, model_name="shufflenet_g1_wd2", **kwargs)
def shufflenet_g3_wd2(**kwargs):
"""
ShuffleNet 0.5x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=3, width_scale=0.5, model_name="shufflenet_g3_wd2", **kwargs)
def shufflenet_g1_wd4(**kwargs):
"""
ShuffleNet 0.25x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=1, width_scale=0.25, model_name="shufflenet_g1_wd4", **kwargs)
def shufflenet_g3_wd4(**kwargs):
"""
ShuffleNet 0.25x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenet(groups=3, width_scale=0.25, model_name="shufflenet_g3_wd4", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
shufflenet_g1_w1,
shufflenet_g2_w1,
shufflenet_g3_w1,
shufflenet_g4_w1,
shufflenet_g8_w1,
shufflenet_g1_w3d4,
shufflenet_g3_w3d4,
shufflenet_g1_wd2,
shufflenet_g3_wd2,
shufflenet_g1_wd4,
shufflenet_g3_wd4,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenet_g1_w1 or weight_count == 1531936)
assert (model != shufflenet_g2_w1 or weight_count == 1733848)
assert (model != shufflenet_g3_w1 or weight_count == 1865728)
assert (model != shufflenet_g4_w1 or weight_count == 1968344)
assert (model != shufflenet_g8_w1 or weight_count == 2434768)
assert (model != shufflenet_g1_w3d4 or weight_count == 975214)
assert (model != shufflenet_g3_w3d4 or weight_count == 1238266)
assert (model != shufflenet_g1_wd2 or weight_count == 534484)
assert (model != shufflenet_g3_wd2 or weight_count == 718324)
assert (model != shufflenet_g1_wd4 or weight_count == 209746)
assert (model != shufflenet_g3_wd4 or weight_count == 305902)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 17,247 | 33.222222 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/bamresnet.py | """
BAM-ResNet for ImageNet-1K, implemented in Gluon.
Original paper: 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
"""
__all__ = ['BamResNet', 'bam_resnet18', 'bam_resnet34', 'bam_resnet50', 'bam_resnet101', 'bam_resnet152']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv1x1_block, conv3x3_block
from .resnet import ResInitBlock, ResUnit
class DenseBlock(HybridBlock):
"""
Standard dense block with Batch normalization and ReLU activation.
Parameters:
----------
in_channels : int
Number of input features.
out_channels : int
Number of output features.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(DenseBlock, self).__init__(**kwargs)
with self.name_scope():
self.fc = nn.Dense(
units=out_channels,
in_units=in_channels)
self.bn = nn.BatchNorm(
in_channels=out_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.fc(x)
x = self.bn(x)
x = self.activ(x)
return x
class ChannelGate(HybridBlock):
"""
BAM channel gate block.
Parameters:
----------
channels : int
Number of input/output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
reduction_ratio : int, default 16
Channel reduction ratio.
num_layers : int, default 1
Number of dense blocks.
"""
def __init__(self,
channels,
bn_use_global_stats,
reduction_ratio=16,
num_layers=1,
**kwargs):
super(ChannelGate, self).__init__(**kwargs)
mid_channels = channels // reduction_ratio
with self.name_scope():
self.pool = nn.GlobalAvgPool2D()
self.flatten = nn.Flatten()
self.init_fc = DenseBlock(
in_channels=channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.main_fcs = nn.HybridSequential(prefix="")
for i in range(num_layers - 1):
self.main_fcs.add(DenseBlock(
in_channels=mid_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats))
self.final_fc = nn.Dense(
units=channels,
in_units=mid_channels)
def hybrid_forward(self, F, x):
input = x
x = self.pool(x)
x = self.flatten(x)
x = self.init_fc(x)
x = self.main_fcs(x)
x = self.final_fc(x)
x = x.expand_dims(2).expand_dims(3).broadcast_like(input)
return x
class SpatialGate(HybridBlock):
"""
BAM spatial gate block.
Parameters:
----------
channels : int
Number of input/output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
reduction_ratio : int, default 16
Channel reduction ratio.
num_dil_convs : int, default 2
Number of dilated convolutions.
dilation : int, default 4
Dilation/padding value for corresponding convolutions.
"""
def __init__(self,
channels,
bn_use_global_stats,
reduction_ratio=16,
num_dil_convs=2,
dilation=4,
**kwargs):
super(SpatialGate, self).__init__(**kwargs)
mid_channels = channels // reduction_ratio
with self.name_scope():
self.init_conv = conv1x1_block(
in_channels=channels,
out_channels=mid_channels,
strides=1,
use_bias=True,
bn_use_global_stats=bn_use_global_stats)
self.dil_convs = nn.HybridSequential(prefix="")
for i in range(num_dil_convs):
self.dil_convs.add(conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=1,
padding=dilation,
dilation=dilation,
use_bias=True,
bn_use_global_stats=bn_use_global_stats))
self.final_conv = conv1x1(
in_channels=mid_channels,
out_channels=1,
strides=1,
use_bias=True)
def hybrid_forward(self, F, x):
input = x
x = self.init_conv(x)
x = self.dil_convs(x)
x = self.final_conv(x)
x = x.broadcast_like(input)
return x
class BamBlock(HybridBlock):
"""
BAM attention block for BAM-ResNet.
Parameters:
----------
channels : int
Number of input/output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
channels,
bn_use_global_stats,
**kwargs):
super(BamBlock, self).__init__(**kwargs)
with self.name_scope():
self.ch_att = ChannelGate(
channels=channels,
bn_use_global_stats=bn_use_global_stats)
self.sp_att = SpatialGate(
channels=channels,
bn_use_global_stats=bn_use_global_stats)
self.sigmoid = nn.Activation("sigmoid")
def hybrid_forward(self, F, x):
att = 1 + self.sigmoid(self.ch_att(x) * self.sp_att(x))
x = x * att
return x
class BamResUnit(HybridBlock):
"""
BAM-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
**kwargs):
super(BamResUnit, self).__init__(**kwargs)
self.use_bam = (strides != 1)
with self.name_scope():
if self.use_bam:
self.bam = BamBlock(
channels=in_channels,
bn_use_global_stats=bn_use_global_stats)
self.res_unit = ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=False)
def hybrid_forward(self, F, x):
if self.use_bam:
x = self.bam(x)
x = self.res_unit(x)
return x
class BamResNet(HybridBlock):
"""
BAM-ResNet model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(BamResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(BamResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_resnet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create BAM-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
use_se : bool
Whether to use SE block.
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported BAM-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = BamResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def bam_resnet18(**kwargs):
"""
BAM-ResNet-18 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=18, model_name="bam_resnet18", **kwargs)
def bam_resnet34(**kwargs):
"""
BAM-ResNet-34 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=34, model_name="bam_resnet34", **kwargs)
def bam_resnet50(**kwargs):
"""
BAM-ResNet-50 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=50, model_name="bam_resnet50", **kwargs)
def bam_resnet101(**kwargs):
"""
BAM-ResNet-101 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=101, model_name="bam_resnet101", **kwargs)
def bam_resnet152(**kwargs):
"""
BAM-ResNet-152 model from 'BAM: Bottleneck Attention Module,' https://arxiv.org/abs/1807.06514.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet(blocks=152, model_name="bam_resnet152", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
bam_resnet18,
bam_resnet34,
bam_resnet50,
bam_resnet101,
bam_resnet152,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != bam_resnet18 or weight_count == 11712503)
assert (model != bam_resnet34 or weight_count == 21820663)
assert (model != bam_resnet50 or weight_count == 25915099)
assert (model != bam_resnet101 or weight_count == 44907227)
assert (model != bam_resnet152 or weight_count == 60550875)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 16,255 | 31.253968 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/resattnet.py | """
ResAttNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
"""
__all__ = ['ResAttNet', 'resattnet56', 'resattnet92', 'resattnet128', 'resattnet164', 'resattnet200', 'resattnet236',
'resattnet452']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv7x7_block, pre_conv1x1_block, pre_conv3x3_block, Hourglass
class PreResBottleneck(HybridBlock):
"""
PreResNet bottleneck block for residual path in PreResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
**kwargs):
super(PreResBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
return_preact=True)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
strides=strides)
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x, x_pre_activ = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x, x_pre_activ
class ResBlock(HybridBlock):
"""
Residual block with pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides=1,
bn_use_global_stats=False,
**kwargs):
super(ResBlock, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = PreResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
strides=strides)
def hybrid_forward(self, F, x):
identity = x
x, x_pre_activ = self.body(x)
if self.resize_identity:
identity = self.identity_conv(x_pre_activ)
x = x + identity
return x
class InterpolationBlock(HybridBlock):
"""
Interpolation block.
Parameters:
----------
size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
"""
def __init__(self,
size,
**kwargs):
super(InterpolationBlock, self).__init__(**kwargs)
self.size = size
def hybrid_forward(self, F, x):
return F.contrib.BilinearResize2D(x, height=self.size[0], width=self.size[1])
class DoubleSkipBlock(HybridBlock):
"""
Double skip connection block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(DoubleSkipBlock, self).__init__(**kwargs)
with self.name_scope():
self.skip1 = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = x + self.skip1(x)
return x
class ResBlockSequence(HybridBlock):
"""
Sequence of residual blocks with pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
length : int
Length of sequence.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
length,
bn_use_global_stats,
**kwargs):
super(ResBlockSequence, self).__init__(**kwargs)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i in range(length):
self.blocks.add(ResBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats))
def hybrid_forward(self, F, x):
x = self.blocks(x)
return x
class DownAttBlock(HybridBlock):
"""
Down sub-block for hourglass of attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
length : int
Length of residual blocks list.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
length,
bn_use_global_stats,
**kwargs):
super(DownAttBlock, self).__init__(**kwargs)
with self.name_scope():
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
self.res_blocks = ResBlockSequence(
in_channels=in_channels,
out_channels=out_channels,
length=length,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.pool(x)
x = self.res_blocks(x)
return x
class UpAttBlock(HybridBlock):
"""
Up sub-block for hourglass of attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
length : int
Length of residual blocks list.
size : tuple of 2 int
Spatial size of the output tensor for the bilinear upsampling operation.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
length,
size,
bn_use_global_stats,
**kwargs):
super(UpAttBlock, self).__init__(**kwargs)
with self.name_scope():
self.res_blocks = ResBlockSequence(
in_channels=in_channels,
out_channels=out_channels,
length=length,
bn_use_global_stats=bn_use_global_stats)
self.upsample = InterpolationBlock(size)
def hybrid_forward(self, F, x):
x = self.res_blocks(x)
x = self.upsample(x)
return x
class MiddleAttBlock(HybridBlock):
"""
Middle sub-block for attention block.
Parameters:
----------
channels : int
Number of input/output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
channels,
bn_use_global_stats,
**kwargs):
super(MiddleAttBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=channels,
out_channels=channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv1x1_block(
in_channels=channels,
out_channels=channels,
bn_use_global_stats=bn_use_global_stats)
self.sigmoid = nn.Activation("sigmoid")
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.sigmoid(x)
return x
class AttBlock(HybridBlock):
"""
Attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
hourglass_depth : int
Depth of hourglass block.
att_scales : list of int
Attention block specific scales.
in_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
hourglass_depth,
att_scales,
in_size,
bn_use_global_stats,
**kwargs):
super(AttBlock, self).__init__(**kwargs)
assert (len(att_scales) == 3)
scale_factor = 2
scale_p, scale_t, scale_r = att_scales
with self.name_scope():
self.init_blocks = ResBlockSequence(
in_channels=in_channels,
out_channels=out_channels,
length=scale_p,
bn_use_global_stats=bn_use_global_stats)
down_seq = nn.HybridSequential(prefix="")
up_seq = nn.HybridSequential(prefix="")
skip_seq = nn.HybridSequential(prefix="")
for i in range(hourglass_depth):
down_seq.add(DownAttBlock(
in_channels=in_channels,
out_channels=out_channels,
length=scale_r,
bn_use_global_stats=bn_use_global_stats))
up_seq.add(UpAttBlock(
in_channels=in_channels,
out_channels=out_channels,
length=scale_r,
size=in_size,
bn_use_global_stats=bn_use_global_stats))
in_size = tuple([x // scale_factor for x in in_size])
if i == 0:
skip_seq.add(ResBlockSequence(
in_channels=in_channels,
out_channels=out_channels,
length=scale_t,
bn_use_global_stats=bn_use_global_stats))
else:
skip_seq.add(DoubleSkipBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats))
self.hg = Hourglass(
down_seq=down_seq,
up_seq=up_seq,
skip_seq=skip_seq,
return_first_skip=True)
self.middle_block = MiddleAttBlock(
channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
self.final_block = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.init_blocks(x)
x, y = self.hg(x)
x = self.middle_block(x)
x = (1 + x) * y
x = self.final_block(x)
return x
class ResAttInitBlock(HybridBlock):
"""
ResAttNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(ResAttInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pool(x)
return x
class PreActivation(HybridBlock):
"""
Pre-activation block without convolution layer. It's used by itself as the final block in PreResNet.
Parameters:
----------
in_channels : int
Number of input channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_use_global_stats,
**kwargs):
super(PreActivation, self).__init__(**kwargs)
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
return x
class ResAttNet(HybridBlock):
"""
ResAttNet model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
attentions : list of list of int
Whether to use a attention unit or residual one.
att_scales : list of int
Attention block specific scales.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
attentions,
att_scales,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ResAttNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResAttInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
in_size = tuple([x // 4 for x in in_size])
for i, channels_per_stage in enumerate(channels):
hourglass_depth = len(channels) - 1 - i
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
if attentions[i][j]:
stage.add(AttBlock(
in_channels=in_channels,
out_channels=out_channels,
hourglass_depth=hourglass_depth,
att_scales=att_scales,
in_size=in_size,
bn_use_global_stats=bn_use_global_stats))
else:
stage.add(ResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
in_size = tuple([x // strides for x in in_size])
self.features.add(stage)
self.features.add(PreActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_resattnet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ResAttNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 56:
att_layers = [1, 1, 1]
att_scales = [1, 2, 1]
elif blocks == 92:
att_layers = [1, 2, 3]
att_scales = [1, 2, 1]
elif blocks == 128:
att_layers = [2, 3, 4]
att_scales = [1, 2, 1]
elif blocks == 164:
att_layers = [3, 4, 5]
att_scales = [1, 2, 1]
elif blocks == 200:
att_layers = [4, 5, 6]
att_scales = [1, 2, 1]
elif blocks == 236:
att_layers = [5, 6, 7]
att_scales = [1, 2, 1]
elif blocks == 452:
att_layers = [5, 6, 7]
att_scales = [2, 4, 3]
else:
raise ValueError("Unsupported ResAttNet with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
layers = att_layers + [2]
channels = [[ci] * (li + 1) for (ci, li) in zip(channels_per_layers, layers)]
attentions = [[0] + [1] * li for li in att_layers] + [[0] * 3]
net = ResAttNet(
channels=channels,
init_block_channels=init_block_channels,
attentions=attentions,
att_scales=att_scales,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def resattnet56(**kwargs):
"""
ResAttNet-56 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resattnet(blocks=56, model_name="resattnet56", **kwargs)
def resattnet92(**kwargs):
"""
ResAttNet-92 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resattnet(blocks=92, model_name="resattnet92", **kwargs)
def resattnet128(**kwargs):
"""
ResAttNet-128 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resattnet(blocks=128, model_name="resattnet128", **kwargs)
def resattnet164(**kwargs):
"""
ResAttNet-164 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resattnet(blocks=164, model_name="resattnet164", **kwargs)
def resattnet200(**kwargs):
"""
ResAttNet-200 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resattnet(blocks=200, model_name="resattnet200", **kwargs)
def resattnet236(**kwargs):
"""
ResAttNet-236 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resattnet(blocks=236, model_name="resattnet236", **kwargs)
def resattnet452(**kwargs):
"""
ResAttNet-452 model from 'Residual Attention Network for Image Classification,' https://arxiv.org/abs/1704.06904.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resattnet(blocks=452, model_name="resattnet452", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
resattnet56,
resattnet92,
resattnet128,
resattnet164,
resattnet200,
resattnet236,
resattnet452,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resattnet56 or weight_count == 31810728)
assert (model != resattnet92 or weight_count == 52466344)
assert (model != resattnet128 or weight_count == 65294504)
assert (model != resattnet164 or weight_count == 78122664)
assert (model != resattnet200 or weight_count == 90950824)
assert (model != resattnet236 or weight_count == 103778984)
assert (model != resattnet452 or weight_count == 182285224)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 25,649 | 32.096774 | 117 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/centernet.py | """
CenterNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Objects as Points,' https://arxiv.org/abs/1904.07850.
"""
__all__ = ['CenterNet', 'centernet_resnet18_voc', 'centernet_resnet18_coco', 'centernet_resnet50b_voc',
'centernet_resnet50b_coco', 'centernet_resnet101b_voc', 'centernet_resnet101b_coco',
'CenterNetHeatmapMaxDet']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent
from .common import conv1x1, conv3x3_block, DeconvBlock
from .resnet import resnet18, resnet50b, resnet101b
class CenterNetDecoderUnit(HybridBlock):
"""
CenterNet decoder unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats=False,
**kwargs):
super(CenterNetDecoderUnit, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
bn_use_global_stats=bn_use_global_stats)
self.deconv = DeconvBlock(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=4,
strides=2,
padding=1,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.deconv(x)
return x
class CenterNetHeadBlock(HybridBlock):
"""
CenterNet simple head block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(CenterNetHeadBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
use_bias=True,
use_bn=False)
self.conv2 = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class CenterNetHeatmapBlock(HybridBlock):
"""
CenterNet heatmap block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
do_nms : bool
Whether do NMS (or simply clip for training otherwise).
"""
def __init__(self,
in_channels,
out_channels,
do_nms,
**kwargs):
super(CenterNetHeatmapBlock, self).__init__(**kwargs)
self.do_nms = do_nms
with self.name_scope():
self.head = CenterNetHeadBlock(
in_channels=in_channels,
out_channels=out_channels)
self.sigmoid = nn.Activation("sigmoid")
if self.do_nms:
self.pool = nn.MaxPool2D(
pool_size=3,
strides=1,
padding=1)
def hybrid_forward(self, F, x):
x = self.head(x)
x = self.sigmoid(x)
if self.do_nms:
y = self.pool(x)
x = x * F.broadcast_equal(y, x)
else:
eps = 1e-4
x = x.clip(a_min=eps, a_max=(1.0 - eps))
return x
class CenterNetHeatmapMaxDet(HybridBlock):
"""
CenterNet decoder for heads (heatmap, wh, reg).
Parameters:
----------
topk : int, default 40
Keep only `topk` detections.
scale : int, default is 4
Downsampling scale factor.
max_batch : int, default is 256
Maximal batch size.
"""
def __init__(self,
topk=40,
scale=4,
max_batch=256,
**kwargs):
super(CenterNetHeatmapMaxDet, self).__init__(**kwargs)
self.topk = topk
self.scale = scale
self.max_batch = max_batch
def hybrid_forward(self, F, x):
heatmap = x.slice_axis(axis=1, begin=0, end=-4)
wh = x.slice_axis(axis=1, begin=-4, end=-2)
reg = x.slice_axis(axis=1, begin=-2, end=None)
_, _, out_h, out_w = heatmap.shape_array().split(num_outputs=4, axis=0)
scores, indices = heatmap.reshape((0, -1)).topk(k=self.topk, ret_typ="both")
indices = indices.astype(dtype="int64")
topk_classes = F.broadcast_div(indices, (out_h * out_w)).astype(dtype="float32")
topk_indices = F.broadcast_mod(indices, (out_h * out_w))
topk_ys = F.broadcast_div(topk_indices, out_w).astype(dtype="float32")
topk_xs = F.broadcast_mod(topk_indices, out_w).astype(dtype="float32")
center = reg.transpose((0, 2, 3, 1)).reshape((0, -1, 2))
wh = wh.transpose((0, 2, 3, 1)).reshape((0, -1, 2))
batch_indices = F.arange(self.max_batch).slice_like(center, axes=0).expand_dims(-1).repeat(self.topk, 1).\
astype(dtype="int64")
reg_xs_indices = F.zeros_like(batch_indices, dtype="int64")
reg_ys_indices = F.ones_like(batch_indices, dtype="int64")
reg_xs = F.concat(batch_indices, topk_indices, reg_xs_indices, dim=0).reshape((3, -1))
reg_ys = F.concat(batch_indices, topk_indices, reg_ys_indices, dim=0).reshape((3, -1))
xs = F.gather_nd(center, reg_xs).reshape((-1, self.topk))
ys = F.gather_nd(center, reg_ys).reshape((-1, self.topk))
topk_xs = topk_xs + xs
topk_ys = topk_ys + ys
w = F.gather_nd(wh, reg_xs).reshape((-1, self.topk))
h = F.gather_nd(wh, reg_ys).reshape((-1, self.topk))
half_w = 0.5 * w
half_h = 0.5 * h
bboxes = F.stack(topk_xs - half_w, topk_ys - half_h, topk_xs + half_w, topk_ys + half_h, axis=-1)
bboxes = bboxes * self.scale
topk_classes = topk_classes.expand_dims(axis=-1)
scores = scores.expand_dims(axis=-1)
result = F.concat(bboxes, topk_classes, scores, dim=-1)
return result
def __repr__(self):
s = "{name}(topk={topk}, scale={scale})"
return s.format(
name=self.__class__.__name__,
topk=self.topk,
scale=self.scale)
def calc_flops(self, x):
assert (x.shape[0] == 1)
num_flops = 10 * x.size
num_macs = 0
return num_flops, num_macs
class CenterNet(HybridBlock):
"""
CenterNet model from 'Objects as Points,' https://arxiv.org/abs/1904.07850.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
channels : list of int
Number of output channels for each decoder unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
return_heatmap : bool, default False
Whether to return only heatmap.
topk : int, default 40
Keep only `topk` detections.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (512, 512)
Spatial size of the expected input image.
classes : int, default 80
Number of classification classes.
"""
def __init__(self,
backbone,
backbone_out_channels,
channels,
bn_use_global_stats=False,
return_heatmap=False,
topk=40,
in_channels=3,
in_size=(512, 512),
classes=80,
**kwargs):
super(CenterNet, self).__init__(**kwargs)
self.in_size = in_size
self.in_channels = in_channels
self.return_heatmap = return_heatmap
with self.name_scope():
self.backbone = backbone
self.decoder = nn.HybridSequential(prefix="")
in_channels = backbone_out_channels
for out_channels in channels:
self.decoder.add(CenterNetDecoderUnit(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
heads = HybridConcurrent(axis=1, prefix="")
heads.add(CenterNetHeatmapBlock(
in_channels=in_channels,
out_channels=classes,
do_nms=(not self.return_heatmap)))
heads.add(CenterNetHeadBlock(
in_channels=in_channels,
out_channels=2))
heads.add(CenterNetHeadBlock(
in_channels=in_channels,
out_channels=2))
self.decoder.add(heads)
if not self.return_heatmap:
self.heatmap_max_det = CenterNetHeatmapMaxDet(
topk=topk,
scale=4)
def hybrid_forward(self, F, x):
x = self.backbone(x)
x = self.decoder(x)
if not self.return_heatmap:
x = self.heatmap_max_det(x)
return x
def get_centernet(backbone,
backbone_out_channels,
classes,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create CenterNet model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int
Number of output channels for the backbone.
classes : int
Number of classes.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns:
-------
HybridBlock
A network.
"""
channels = [256, 128, 64]
net = CenterNet(
backbone=backbone,
backbone_out_channels=backbone_out_channels,
channels=channels,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def centernet_resnet18_voc(pretrained_backbone=False, classes=20, **kwargs):
"""
CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 20
Number of classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnet18(pretrained=pretrained_backbone).features[:-1]
return get_centernet(backbone=backbone, backbone_out_channels=512, classes=classes,
model_name="centernet_resnet18_voc", **kwargs)
def centernet_resnet18_coco(pretrained_backbone=False, classes=80, **kwargs):
"""
CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 80
Number of classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnet18(pretrained=pretrained_backbone).features[:-1]
return get_centernet(backbone=backbone, backbone_out_channels=512, classes=classes,
model_name="centernet_resnet18_coco", **kwargs)
def centernet_resnet50b_voc(pretrained_backbone=False, classes=20, **kwargs):
"""
CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 20
Number of classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnet50b(pretrained=pretrained_backbone).features[:-1]
return get_centernet(backbone=backbone, backbone_out_channels=2048, classes=classes,
model_name="centernet_resnet50b_voc", **kwargs)
def centernet_resnet50b_coco(pretrained_backbone=False, classes=80, **kwargs):
"""
CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 80
Number of classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnet50b(pretrained=pretrained_backbone).features[:-1]
return get_centernet(backbone=backbone, backbone_out_channels=2048, classes=classes,
model_name="centernet_resnet50b_coco", **kwargs)
def centernet_resnet101b_voc(pretrained_backbone=False, classes=20, **kwargs):
"""
CenterNet model on the base of ResNet-101b for VOC Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 20
Number of classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnet101b(pretrained=pretrained_backbone).features[:-1]
return get_centernet(backbone=backbone, backbone_out_channels=2048, classes=classes,
model_name="centernet_resnet101b_voc", **kwargs)
def centernet_resnet101b_coco(pretrained_backbone=False, classes=80, **kwargs):
"""
CenterNet model on the base of ResNet-101b for COCO Detection from 'Objects as Points,'
https://arxiv.org/abs/1904.07850.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 80
Number of classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnet101b(pretrained=pretrained_backbone).features[:-1]
return get_centernet(backbone=backbone, backbone_out_channels=2048, classes=classes,
model_name="centernet_resnet101b_coco", **kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (512, 512)
topk = 40
return_heatmap = False
pretrained = False
models = [
(centernet_resnet18_voc, 20),
(centernet_resnet18_coco, 80),
(centernet_resnet50b_voc, 20),
(centernet_resnet50b_coco, 80),
(centernet_resnet101b_voc, 20),
(centernet_resnet101b_coco, 80),
]
for model, classes in models:
net = model(pretrained=pretrained, topk=topk, in_size=in_size, return_heatmap=return_heatmap)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != centernet_resnet18_voc or weight_count == 14215640)
assert (model != centernet_resnet18_coco or weight_count == 14219540)
assert (model != centernet_resnet50b_voc or weight_count == 30086104)
assert (model != centernet_resnet50b_coco or weight_count == 30090004)
assert (model != centernet_resnet101b_voc or weight_count == 49078232)
assert (model != centernet_resnet101b_coco or weight_count == 49082132)
batch = 14
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert (y.shape[0] == batch)
if return_heatmap:
assert (y.shape[1] == classes + 4) and (y.shape[2] == x.shape[2] // 4) and (y.shape[3] == x.shape[3] // 4)
else:
assert (y.shape[1] == topk) and (y.shape[2] == 6)
if __name__ == "__main__":
_test()
| 18,867 | 34.466165 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/xdensenet_cifar.py | """
X-DenseNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
"""
__all__ = ['CIFARXDenseNet', 'xdensenet40_2_k24_bc_cifar10', 'xdensenet40_2_k24_bc_cifar100',
'xdensenet40_2_k24_bc_svhn', 'xdensenet40_2_k36_bc_cifar10', 'xdensenet40_2_k36_bc_cifar100',
'xdensenet40_2_k36_bc_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3
from .preresnet import PreResActivation
from .densenet import TransitionBlock
from .xdensenet import pre_xconv3x3_block, XDenseUnit
class XDenseSimpleUnit(HybridBlock):
"""
X-DenseNet simple unit for CIFAR.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
expand_ratio : int
Ratio of expansion.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
dropout_rate,
expand_ratio,
**kwargs):
super(XDenseSimpleUnit, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
inc_channels = out_channels - in_channels
with self.name_scope():
self.conv = pre_xconv3x3_block(
in_channels=in_channels,
out_channels=inc_channels,
bn_use_global_stats=bn_use_global_stats,
expand_ratio=expand_ratio)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
identity = x
x = self.conv(x)
if self.use_dropout:
x = self.dropout(x)
x = F.concat(identity, x, dim=1)
return x
class CIFARXDenseNet(HybridBlock):
"""
X-DenseNet model for CIFAR from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'
https://arxiv.org/abs/1711.08757.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
expand_ratio : int, default 2
Ratio of expansion.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
dropout_rate=0.0,
expand_ratio=2,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARXDenseNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
unit_class = XDenseUnit if bottleneck else XDenseSimpleUnit
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
if i != 0:
stage.add(TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2),
bn_use_global_stats=bn_use_global_stats))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
stage.add(unit_class(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
dropout_rate=dropout_rate,
expand_ratio=expand_ratio))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_xdensenet_cifar(classes,
blocks,
growth_rate,
bottleneck,
expand_ratio=2,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create X-DenseNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
growth_rate : int
Growth rate.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
expand_ratio : int, default 2
Ratio of expansion.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 4) % 6 == 0)
layers = [(blocks - 4) // 6] * 3
else:
assert ((blocks - 4) % 3 == 0)
layers = [(blocks - 4) // 3] * 3
init_block_channels = 2 * growth_rate
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = CIFARXDenseNet(
channels=channels,
init_block_channels=init_block_channels,
classes=classes,
bottleneck=bottleneck,
expand_ratio=expand_ratio,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def xdensenet40_2_k24_bc_cifar10(classes=10, **kwargs):
"""
X-DenseNet-BC-40-2 (k=24) model for CIFAR-10 from 'Deep Expander Networks: Efficient Deep Networks from Graph
Theory,' https://arxiv.org/abs/1711.08757.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_xdensenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True,
model_name="xdensenet40_2_k24_bc_cifar10", **kwargs)
def xdensenet40_2_k24_bc_cifar100(classes=100, **kwargs):
"""
X-DenseNet-BC-40-2 (k=24) model for CIFAR-100 from 'Deep Expander Networks: Efficient Deep Networks from Graph
Theory,' https://arxiv.org/abs/1711.08757.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_xdensenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True,
model_name="xdensenet40_2_k24_bc_cifar100", **kwargs)
def xdensenet40_2_k24_bc_svhn(classes=10, **kwargs):
"""
X-DenseNet-BC-40-2 (k=24) model for SVHN from 'Deep Expander Networks: Efficient Deep Networks from Graph
Theory,' https://arxiv.org/abs/1711.08757.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_xdensenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True,
model_name="xdensenet40_2_k24_bc_svhn", **kwargs)
def xdensenet40_2_k36_bc_cifar10(classes=10, **kwargs):
"""
X-DenseNet-BC-40-2 (k=36) model for CIFAR-10 from 'Deep Expander Networks: Efficient Deep Networks from Graph
Theory,' https://arxiv.org/abs/1711.08757.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_xdensenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True,
model_name="xdensenet40_2_k36_bc_cifar10", **kwargs)
def xdensenet40_2_k36_bc_cifar100(classes=100, **kwargs):
"""
X-DenseNet-BC-40-2 (k=36) model for CIFAR-100 from 'Deep Expander Networks: Efficient Deep Networks from Graph
Theory,' https://arxiv.org/abs/1711.08757.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_xdensenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True,
model_name="xdensenet40_2_k36_bc_cifar100", **kwargs)
def xdensenet40_2_k36_bc_svhn(classes=10, **kwargs):
"""
X-DenseNet-BC-40-2 (k=36) model for SVHN from 'Deep Expander Networks: Efficient Deep Networks from Graph
Theory,' https://arxiv.org/abs/1711.08757.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_xdensenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True,
model_name="xdensenet40_2_k36_bc_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(xdensenet40_2_k24_bc_cifar10, 10),
(xdensenet40_2_k24_bc_cifar100, 100),
(xdensenet40_2_k24_bc_svhn, 10),
(xdensenet40_2_k36_bc_cifar10, 10),
(xdensenet40_2_k36_bc_cifar100, 100),
(xdensenet40_2_k36_bc_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != xdensenet40_2_k24_bc_cifar10 or weight_count == 690346)
assert (model != xdensenet40_2_k24_bc_cifar100 or weight_count == 714196)
assert (model != xdensenet40_2_k24_bc_svhn or weight_count == 690346)
assert (model != xdensenet40_2_k36_bc_cifar10 or weight_count == 1542682)
assert (model != xdensenet40_2_k36_bc_cifar100 or weight_count == 1578412)
assert (model != xdensenet40_2_k36_bc_svhn or weight_count == 1542682)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 14,171 | 35.245524 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/ntsnet_cub.py | """
NTS-Net for CUB-200-2011, implemented in Gluon.
Original paper: 'Learning to Navigate for Fine-grained Classification,' https://arxiv.org/abs/1809.00287.
"""
__all__ = ['NTSNet', 'ntsnet_cub']
import os
import numpy as np
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv3x3
from .resnet import resnet50b
def hard_nms(cdds,
top_n=10,
iou_thresh=0.25):
"""
Hard Non-Maximum Suppression.
Parameters:
----------
cdds : np.array
Borders.
top_n : int, default 10
Number of top-K informative regions.
iou_thresh : float, default 0.25
IoU threshold.
Returns:
-------
np.array
Filtered borders.
"""
assert (type(cdds) == np.ndarray)
assert (len(cdds.shape) == 2)
assert (cdds.shape[1] >= 5)
cdds = cdds.copy()
indices = np.argsort(cdds[:, 0])
cdds = cdds[indices]
cdd_results = []
res = cdds
while res.any():
cdd = res[-1]
cdd_results.append(cdd)
if len(cdd_results) == top_n:
return np.array(cdd_results)
res = res[:-1]
start_max = np.maximum(res[:, 1:3], cdd[1:3])
end_min = np.minimum(res[:, 3:5], cdd[3:5])
lengths = end_min - start_max
intersec_map = lengths[:, 0] * lengths[:, 1]
intersec_map[np.logical_or(lengths[:, 0] < 0, lengths[:, 1] < 0)] = 0
iou_map_cur = intersec_map / ((res[:, 3] - res[:, 1]) * (res[:, 4] - res[:, 2]) + (cdd[3] - cdd[1]) * (
cdd[4] - cdd[2]) - intersec_map)
res = res[iou_map_cur < iou_thresh]
return np.array(cdd_results)
class NavigatorBranch(HybridBlock):
"""
Navigator branch block for Navigator unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
"""
def __init__(self,
in_channels,
out_channels,
strides,
**kwargs):
super(NavigatorBranch, self).__init__(**kwargs)
mid_channels = 128
with self.name_scope():
self.down_conv = conv3x3(
in_channels=in_channels,
out_channels=mid_channels,
strides=strides,
use_bias=True)
self.activ = nn.Activation("relu")
self.tidy_conv = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
self.flatten = nn.Flatten()
def hybrid_forward(self, F, x):
y = self.down_conv(x)
y = self.activ(y)
z = self.tidy_conv(y)
z = self.flatten(z)
return z, y
class NavigatorUnit(HybridBlock):
"""
Navigator init.
"""
def __init__(self,
**kwargs):
super(NavigatorUnit, self).__init__(**kwargs)
with self.name_scope():
self.branch1 = NavigatorBranch(
in_channels=2048,
out_channels=6,
strides=1)
self.branch2 = NavigatorBranch(
in_channels=128,
out_channels=6,
strides=2)
self.branch3 = NavigatorBranch(
in_channels=128,
out_channels=9,
strides=2)
def hybrid_forward(self, F, x):
t1, x = self.branch1(x)
t2, x = self.branch2(x)
t3, _ = self.branch3(x)
return F.concat(t1, t2, t3, dim=1)
class NTSNet(HybridBlock):
"""
NTS-Net model from 'Learning to Navigate for Fine-grained Classification,' https://arxiv.org/abs/1809.00287.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
aux : bool, default False
Whether to output auxiliary results.
top_n : int, default 4
Number of extra top-K informative regions.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
backbone,
aux=False,
top_n=4,
in_channels=3,
in_size=(448, 448),
classes=200,
**kwargs):
super(NTSNet, self).__init__(**kwargs)
assert (in_channels > 0)
self.in_size = in_size
self.classes = classes
self.top_n = top_n
self.aux = aux
self.num_cat = 4
pad_side = 224
self.pad_width = (0, 0, 0, 0, pad_side, pad_side, pad_side, pad_side)
_, edge_anchors, _ = self._generate_default_anchor_maps()
self.edge_anchors = (edge_anchors + 224).astype(np.int)
self.edge_anchors = np.concatenate(
(self.edge_anchors.copy(), np.arange(0, len(self.edge_anchors)).reshape(-1, 1)), axis=1)
with self.name_scope():
self.backbone = backbone
self.backbone_tail = nn.HybridSequential(prefix="")
self.backbone_tail.add(nn.GlobalAvgPool2D())
self.backbone_tail.add(nn.Flatten())
self.backbone_tail.add(nn.Dropout(rate=0.5))
self.backbone_classifier = nn.Dense(
units=classes,
in_units=(512 * 4))
self.navigator_unit = NavigatorUnit()
self.concat_net = nn.Dense(
units=classes,
in_units=(2048 * (self.num_cat + 1)))
if self.aux:
self.partcls_net = nn.Dense(
units=classes,
in_units=(512 * 4))
def hybrid_forward(self, F, x):
raw_pre_features = self.backbone(x)
rpn_score = self.navigator_unit(raw_pre_features)
all_cdds = [np.concatenate((y.reshape(-1, 1), self.edge_anchors.copy()), axis=1)
for y in rpn_score.asnumpy()]
top_n_cdds = [hard_nms(y, top_n=self.top_n, iou_thresh=0.25) for y in all_cdds]
top_n_cdds = np.array(top_n_cdds)
top_n_index = top_n_cdds[:, :, -1].astype(np.int64)
top_n_index2 = mx.nd.array(np.array([np.repeat(np.arange(top_n_cdds.shape[0]), top_n_cdds.shape[1]),
top_n_index.flatten()]), dtype=np.int64)
top_n_prob = F.gather_nd(rpn_score, top_n_index2).reshape(x.shape[0], -1)
batch = x.shape[0]
part_imgs = mx.nd.zeros(shape=(batch, self.top_n, 3, 224, 224), ctx=x.context, dtype=x.dtype)
x_pad = F.pad(x, mode="constant", pad_width=self.pad_width, constant_value=0)
for i in range(batch):
for j in range(self.top_n):
y0, x0, y1, x1 = tuple(top_n_cdds[i][j, 1:5].astype(np.int64))
part_imgs[i:i + 1, j] = F.contrib.BilinearResize2D(
x_pad[i:i + 1, :, y0:y1, x0:x1],
height=224,
width=224)
part_imgs = part_imgs.reshape((batch * self.top_n, 3, 224, 224))
part_features = self.backbone_tail(self.backbone(part_imgs.detach()))
part_feature = part_features.reshape((batch, self.top_n, -1))
part_feature = part_feature[:, :self.num_cat, :]
part_feature = part_feature.reshape((batch, -1))
raw_features = self.backbone_tail(raw_pre_features.detach())
concat_out = F.concat(part_feature, raw_features, dim=1)
concat_logits = self.concat_net(concat_out)
if self.aux:
raw_logits = self.backbone_classifier(raw_features)
part_logits = self.partcls_net(part_features).reshape((batch, self.top_n, -1))
return concat_logits, raw_logits, part_logits, top_n_prob
else:
return concat_logits
@staticmethod
def _generate_default_anchor_maps(input_shape=(448, 448)):
"""
Generate default anchor maps.
Parameters:
----------
input_shape : tuple of 2 int
Input image size.
Returns:
-------
center_anchors : np.array
anchors * 4 (oy, ox, h, w).
edge_anchors : np.array
anchors * 4 (y0, x0, y1, x1).
anchor_area : np.array
anchors * 1 (area).
"""
anchor_scale = [2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]
anchor_aspect_ratio = [0.667, 1, 1.5]
anchors_setting = (
dict(layer="p3", stride=32, size=48, scale=anchor_scale, aspect_ratio=anchor_aspect_ratio),
dict(layer="p4", stride=64, size=96, scale=anchor_scale, aspect_ratio=anchor_aspect_ratio),
dict(layer="p5", stride=128, size=192, scale=[1, anchor_scale[0], anchor_scale[1]],
aspect_ratio=anchor_aspect_ratio),
)
center_anchors = np.zeros((0, 4), dtype=np.float32)
edge_anchors = np.zeros((0, 4), dtype=np.float32)
anchor_areas = np.zeros((0,), dtype=np.float32)
input_shape = np.array(input_shape, dtype=int)
for anchor_info in anchors_setting:
stride = anchor_info["stride"]
size = anchor_info["size"]
scales = anchor_info["scale"]
aspect_ratios = anchor_info["aspect_ratio"]
output_map_shape = np.ceil(input_shape.astype(np.float32) / stride)
output_map_shape = output_map_shape.astype(np.int)
output_shape = tuple(output_map_shape) + (4, )
ostart = stride / 2.0
oy = np.arange(ostart, ostart + stride * output_shape[0], stride)
oy = oy.reshape(output_shape[0], 1)
ox = np.arange(ostart, ostart + stride * output_shape[1], stride)
ox = ox.reshape(1, output_shape[1])
center_anchor_map_template = np.zeros(output_shape, dtype=np.float32)
center_anchor_map_template[:, :, 0] = oy
center_anchor_map_template[:, :, 1] = ox
for anchor_scale in scales:
for anchor_aspect_ratio in aspect_ratios:
center_anchor_map = center_anchor_map_template.copy()
center_anchor_map[:, :, 2] = size * anchor_scale / float(anchor_aspect_ratio) ** 0.5
center_anchor_map[:, :, 3] = size * anchor_scale * float(anchor_aspect_ratio) ** 0.5
edge_anchor_map = np.concatenate(
(center_anchor_map[:, :, :2] - center_anchor_map[:, :, 2:4] / 2.0,
center_anchor_map[:, :, :2] + center_anchor_map[:, :, 2:4] / 2.0),
axis=-1)
anchor_area_map = center_anchor_map[:, :, 2] * center_anchor_map[:, :, 3]
center_anchors = np.concatenate((center_anchors, center_anchor_map.reshape(-1, 4)))
edge_anchors = np.concatenate((edge_anchors, edge_anchor_map.reshape(-1, 4)))
anchor_areas = np.concatenate((anchor_areas, anchor_area_map.reshape(-1)))
return center_anchors, edge_anchors, anchor_areas
def get_ntsnet(backbone,
aux=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create NTS-Net model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
aux : bool, default False
Whether to output auxiliary results.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = NTSNet(
backbone=backbone,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def ntsnet_cub(pretrained_backbone=False, aux=True, **kwargs):
"""
NTS-Net model from 'Learning to Navigate for Fine-grained Classification,' https://arxiv.org/abs/1809.00287.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnet50b(pretrained=pretrained_backbone).features[:-1]
return get_ntsnet(backbone=backbone, aux=aux, model_name="ntsnet_cub", **kwargs)
def _test():
import numpy as np
import mxnet as mx
aux = True
pretrained = False
models = [
ntsnet_cub,
]
for model in models:
net = model(pretrained=pretrained, aux=aux)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != ntsnet_cub or weight_count == 29033133)
else:
assert (model != ntsnet_cub or weight_count == 28623333)
x = mx.nd.zeros((5, 3, 448, 448), ctx=ctx)
ys = net(x)
y = ys[0] if aux else ys
assert (y.shape[0] == x.shape[0]) and (y.shape[1] == 200)
if __name__ == "__main__":
_test()
| 14,467 | 33.695444 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/proxylessnas_cub.py | """
ProxylessNAS for CUB-200-2011, implemented in Gluon.
Original paper: 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
"""
__all__ = ['proxylessnas_cpu_cub', 'proxylessnas_gpu_cub', 'proxylessnas_mobile_cub', 'proxylessnas_mobile14_cub']
from .proxylessnas import get_proxylessnas
def proxylessnas_cpu_cub(classes=200, **kwargs):
"""
ProxylessNAS (CPU) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and
Hardware,' https://arxiv.org/abs/1812.00332.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(classes=classes, version="cpu", model_name="proxylessnas_cpu_cub", **kwargs)
def proxylessnas_gpu_cub(classes=200, **kwargs):
"""
ProxylessNAS (GPU) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and
Hardware,' https://arxiv.org/abs/1812.00332.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(classes=classes, version="gpu", model_name="proxylessnas_gpu_cub", **kwargs)
def proxylessnas_mobile_cub(classes=200, **kwargs):
"""
ProxylessNAS (Mobile) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task
and Hardware,' https://arxiv.org/abs/1812.00332.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(classes=classes, version="mobile", model_name="proxylessnas_mobile_cub", **kwargs)
def proxylessnas_mobile14_cub(classes=200, **kwargs):
"""
ProxylessNAS (Mobile-14) model for CUB-200-2011 from 'ProxylessNAS: Direct Neural Architecture Search on Target Task
and Hardware,' https://arxiv.org/abs/1812.00332.
Parameters:
----------
classes : int, default 200
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(classes=classes, version="mobile14", model_name="proxylessnas_mobile14_cub", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
proxylessnas_cpu_cub,
proxylessnas_gpu_cub,
proxylessnas_mobile_cub,
proxylessnas_mobile14_cub,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != proxylessnas_cpu_cub or weight_count == 3215248)
assert (model != proxylessnas_gpu_cub or weight_count == 5736648)
assert (model != proxylessnas_mobile_cub or weight_count == 3055712)
assert (model != proxylessnas_mobile14_cub or weight_count == 5423168)
x = mx.nd.zeros((14, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (14, 200))
if __name__ == "__main__":
_test()
| 4,484 | 33.767442 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/ibnresnet.py | """
IBN-ResNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
"""
__all__ = ['IBNResNet', 'ibn_resnet50', 'ibn_resnet101', 'ibn_resnet152']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, IBN
from .resnet import ResInitBlock
class IBNConvBlock(HybridBlock):
"""
IBN-Net specific convolution block with BN/IBN normalization and ReLU activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_ibn : bool, default False
Whether use Instance-Batch Normalization.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_ibn=False,
bn_use_global_stats=False,
activate=True,
**kwargs):
super(IBNConvBlock, self).__init__(**kwargs)
self.activate = activate
self.use_ibn = use_ibn
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
in_channels=in_channels)
if self.use_ibn:
self.ibn = IBN(channels=out_channels)
else:
self.bn = nn.BatchNorm(
in_channels=out_channels,
use_global_stats=bn_use_global_stats)
if self.activate:
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.conv(x)
if self.use_ibn:
x = self.ibn(x)
else:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def ibn_conv1x1_block(in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
use_ibn=False,
bn_use_global_stats=False,
activate=True):
"""
1x1 version of the IBN-Net specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_ibn : bool, default False
Whether use Instance-Batch Normalization.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
activate : bool, default True
Whether activate the convolution block.
"""
return IBNConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
groups=groups,
use_bias=use_bias,
use_ibn=use_ibn,
bn_use_global_stats=bn_use_global_stats,
activate=activate)
class IBNResBottleneck(HybridBlock):
"""
IBN-ResNet bottleneck block for residual path in IBN-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
conv1_ibn : bool
Whether to use IBN normalization in the first convolution layer of the block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
conv1_ibn,
bn_use_global_stats,
**kwargs):
super(IBNResBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
with self.name_scope():
self.conv1 = ibn_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_ibn=conv1_ibn,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class IBNResUnit(HybridBlock):
"""
IBN-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
conv1_ibn : bool
Whether to use IBN normalization in the first convolution layer of the block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
conv1_ibn,
bn_use_global_stats,
**kwargs):
super(IBNResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = IBNResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_ibn=conv1_ibn,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class IBNResNet(HybridBlock):
"""
IBN-ResNet model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(IBNResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
conv1_ibn = (out_channels < 2048)
stage.add(IBNResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_ibn=conv1_ibn,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_ibnresnet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create IBN-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported IBN-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = IBNResNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def ibn_resnet50(**kwargs):
"""
IBN-ResNet-50 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibnresnet(blocks=50, model_name="ibn_resnet50", **kwargs)
def ibn_resnet101(**kwargs):
"""
IBN-ResNet-101 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibnresnet(blocks=101, model_name="ibn_resnet101", **kwargs)
def ibn_resnet152(**kwargs):
"""
IBN-ResNet-152 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibnresnet(blocks=152, model_name="ibn_resnet152", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
ibn_resnet50,
ibn_resnet101,
ibn_resnet152,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ibn_resnet50 or weight_count == 25557032)
assert (model != ibn_resnet101 or weight_count == 44549160)
assert (model != ibn_resnet152 or weight_count == 60192808)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 14,932 | 31.81978 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/common.py | """
Common routines for models in Gluon.
"""
__all__ = ['round_channels', 'BreakBlock', 'get_activation_layer', 'ReLU6', 'PReLU2', 'HSigmoid', 'HSwish', 'Softmax',
'SelectableDense', 'BatchNormExtra', 'DenseBlock', 'ConvBlock1d', 'conv1x1', 'conv3x3', 'depthwise_conv3x3',
'ConvBlock', 'conv1x1_block', 'conv3x3_block', 'conv5x5_block', 'conv7x7_block', 'dwconv_block',
'dwconv3x3_block', 'dwconv5x5_block', 'dwsconv3x3_block', 'PreConvBlock', 'pre_conv1x1_block',
'pre_conv3x3_block', 'DeconvBlock', 'NormActivation', 'InterpolationBlock', 'ChannelShuffle',
'ChannelShuffle2', 'SEBlock', 'SABlock', 'SAConvBlock', 'saconv3x3_block', 'DucBlock', 'split', 'IBN',
'DualPathSequential', 'ParametricSequential', 'Concurrent', 'SequentialConcurrent', 'ParametricConcurrent',
'Hourglass', 'SesquialteralHourglass', 'MultiOutputSequential', 'ParallelConcurent',
'DualPathParallelConcurent', 'HeatmapMaxDetBlock']
import math
from inspect import isfunction
import mxnet as mx
from mxnet.gluon import nn, HybridBlock
def round_channels(channels,
divisor=8):
"""
Round weighted channel number (make divisible operation).
Parameters:
----------
channels : int or float
Original number of channels.
divisor : int, default 8
Alignment value.
Returns:
-------
int
Weighted number of channels.
"""
rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor)
if float(rounded_channels) < 0.9 * channels:
rounded_channels += divisor
return rounded_channels
class BreakBlock(HybridBlock):
"""
Break coonnection block for hourglass.
"""
def __init__(self, prefix=None, params=None):
super(BreakBlock, self).__init__(prefix=prefix, params=params)
def hybrid_forward(self, F, x):
return None
def __repr__(self):
return '{name}()'.format(name=self.__class__.__name__)
class ReLU6(HybridBlock):
"""
ReLU6 activation layer.
"""
def __init__(self, **kwargs):
super(ReLU6, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
return F.clip(x, 0.0, 6.0, name="relu6")
def __repr__(self):
return '{name}()'.format(name=self.__class__.__name__)
class PReLU2(HybridBlock):
"""
Parametric leaky version of a Rectified Linear Unit (with wide alpha).
Parameters:
----------
in_channels : int
Number of input channels.
alpha_initializer : Initializer
Initializer for the `embeddings` matrix.
"""
def __init__(self,
in_channels=1,
alpha_initializer=mx.init.Constant(0.25),
**kwargs):
super(PReLU2, self).__init__(**kwargs)
with self.name_scope():
self.alpha = self.params.get("alpha", shape=(in_channels,), init=alpha_initializer)
def hybrid_forward(self, F, x, alpha):
return F.LeakyReLU(x, gamma=alpha, act_type="prelu", name="fwd")
def __repr__(self):
s = '{name}(in_channels={in_channels})'
return s.format(
name=self.__class__.__name__,
in_channels=self.alpha.shape[0])
class HSigmoid(HybridBlock):
"""
Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,'
https://arxiv.org/abs/1905.02244.
"""
def __init__(self, **kwargs):
super(HSigmoid, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
return F.clip(x + 3.0, 0.0, 6.0, name="relu6") / 6.0
def __repr__(self):
return '{name}()'.format(name=self.__class__.__name__)
class HSwish(HybridBlock):
"""
H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
"""
def __init__(self, **kwargs):
super(HSwish, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
return x * F.clip(x + 3.0, 0.0, 6.0, name="relu6") / 6.0
def __repr__(self):
return '{name}()'.format(name=self.__class__.__name__)
class Softmax(HybridBlock):
"""
Softmax activation function.
Parameters:
----------
axis : int, default 1
Axis along which to do softmax.
"""
def __init__(self, axis=1, **kwargs):
super(Softmax, self).__init__(**kwargs)
self.axis = axis
def hybrid_forward(self, F, x):
return x.softmax(axis=self.axis)
def __repr__(self):
return '{name}()'.format(name=self.__class__.__name__)
def get_activation_layer(activation):
"""
Create activation layer from string/function.
Parameters:
----------
activation : function, or str, or HybridBlock
Activation function or name of activation function.
Returns:
-------
HybridBlock
Activation layer.
"""
assert (activation is not None)
if isfunction(activation):
return activation()
elif isinstance(activation, str):
if activation == "relu6":
return ReLU6()
elif activation == "swish":
return nn.Swish()
elif activation == "hswish":
return HSwish()
elif activation == "hsigmoid":
return HSigmoid()
else:
return nn.Activation(activation)
else:
assert (isinstance(activation, HybridBlock))
return activation
class SelectableDense(HybridBlock):
"""
Selectable dense layer.
Parameters:
----------
in_channels : int
Number of input features.
out_channels : int
Number of output features.
use_bias : bool, default False
Whether the layer uses a bias vector.
dtype : str or np.dtype, default 'float32'
Data type of output embeddings.
weight_initializer : str or `Initializer`
Initializer for the `kernel` weights matrix.
bias_initializer: str or `Initializer`
Initializer for the bias vector.
num_options : int, default 1
Number of selectable options.
"""
def __init__(self,
in_channels,
out_channels,
use_bias=False,
dtype="float32",
weight_initializer=None,
bias_initializer="zeros",
num_options=1,
**kwargs):
super(SelectableDense, self).__init__(**kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.use_bias = use_bias
self.num_options = num_options
with self.name_scope():
self.weight = self.params.get(
"weight",
shape=(num_options, out_channels, in_channels),
init=weight_initializer,
dtype=dtype,
allow_deferred_init=True)
if use_bias:
self.bias = self.params.get(
"bias",
shape=(num_options, out_channels),
init=bias_initializer,
dtype=dtype,
allow_deferred_init=True)
else:
self.bias = None
def hybrid_forward(self, F, x, indices, weight, bias=None):
weight = F.take(weight, indices=indices, axis=0)
x = x.expand_dims(axis=-1)
x = F.batch_dot(weight, x)
x = x.squeeze(axis=-1)
if self.use_bias:
bias = F.take(bias, indices=indices, axis=0)
x += bias
return x
def __repr__(self):
s = "{name}({layout}, {num_options})"
shape = self.weight.shape
return s.format(name=self.__class__.__name__,
layout="{0} -> {1}".format(shape[1] if shape[1] else None, shape[0]),
num_options=self.num_options)
class BatchNormExtra(nn.BatchNorm):
"""
Batch normalization layer with extra parameters.
"""
def __init__(self, **kwargs):
has_cudnn_off = ("cudnn_off" in kwargs)
if has_cudnn_off:
cudnn_off = kwargs["cudnn_off"]
del kwargs["cudnn_off"]
super(BatchNormExtra, self).__init__(**kwargs)
if has_cudnn_off:
self._kwargs["cudnn_off"] = cudnn_off
class DenseBlock(HybridBlock):
"""
Standard dense block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input features.
out_channels : int
Number of output features.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
super(DenseBlock, self).__init__(**kwargs)
self.activate = (activation is not None)
self.use_bn = use_bn
with self.name_scope():
self.fc = nn.Dense(
units=out_channels,
use_bias=use_bias,
in_units=in_channels)
if self.use_bn:
self.bn = BatchNormExtra(
in_channels=out_channels,
epsilon=bn_epsilon,
use_global_stats=bn_use_global_stats,
cudnn_off=bn_cudnn_off)
if self.activate:
self.activ = get_activation_layer(activation)
def hybrid_forward(self, F, x):
x = self.fc(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
class ConvBlock1d(HybridBlock):
"""
Standard 1D convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
strides : int
Strides of the convolution.
padding : int
Padding value for convolution layer.
dilation : int
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
super(ConvBlock1d, self).__init__(**kwargs)
self.activate = (activation is not None)
self.use_bn = use_bn
with self.name_scope():
self.conv = nn.Conv1D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
in_channels=in_channels)
if self.use_bn:
self.bn = BatchNormExtra(
in_channels=out_channels,
epsilon=bn_epsilon,
use_global_stats=bn_use_global_stats,
cudnn_off=bn_cudnn_off)
if self.activate:
self.activ = get_activation_layer(activation)
def hybrid_forward(self, F, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1(in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False):
"""
Convolution 1x1 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2D(
channels=out_channels,
kernel_size=1,
strides=strides,
groups=groups,
use_bias=use_bias,
in_channels=in_channels)
def conv3x3(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
groups=1,
use_bias=False):
"""
Convolution 3x3 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2D(
channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
in_channels=in_channels)
def depthwise_conv3x3(channels,
strides=1,
padding=1,
dilation=1,
use_bias=False):
"""
Depthwise convolution 3x3 layer.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2D(
channels=channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=channels,
use_bias=use_bias,
in_channels=channels)
class ConvBlock(HybridBlock):
"""
Standard convolution block with batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
super(ConvBlock, self).__init__(**kwargs)
self.activate = (activation is not None)
self.use_bn = use_bn
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
in_channels=in_channels)
if self.use_bn:
self.bn = BatchNormExtra(
in_channels=out_channels,
epsilon=bn_epsilon,
use_global_stats=bn_use_global_stats,
cudnn_off=bn_cudnn_off)
if self.activate:
self.activ = get_activation_layer(activation)
def hybrid_forward(self, F, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1_block(in_channels,
out_channels,
strides=1,
padding=0,
groups=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
1x1 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=padding,
groups=groups,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation,
**kwargs)
def conv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
3x3 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation,
**kwargs)
def conv5x5_block(in_channels,
out_channels,
strides=1,
padding=2,
dilation=1,
groups=1,
use_bias=False,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
5x5 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation,
**kwargs)
def conv7x7_block(in_channels,
out_channels,
strides=1,
padding=3,
use_bias=False,
use_bn=True,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
7x7 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=strides,
padding=padding,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation,
**kwargs)
def dwconv_block(in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
Depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=out_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation,
**kwargs)
def dwconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
3x3 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation,
**kwargs)
def dwconv5x5_block(in_channels,
out_channels,
strides=1,
padding=2,
dilation=1,
use_bias=False,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
5x5 depthwise version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
return dwconv_block(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation,
**kwargs)
class DwsConvBlock(HybridBlock):
"""
Depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
dw_use_bn : bool, default True
Whether to use BatchNorm layer (depthwise convolution block).
pw_use_bn : bool, default True
Whether to use BatchNorm layer (pointwise convolution block).
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
dw_activation : function or str or None, default nn.Activation('relu')
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default nn.Activation('relu')
Activation function after the pointwise convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
use_bias=False,
dw_use_bn=True,
pw_use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
dw_activation=(lambda: nn.Activation("relu")),
pw_activation=(lambda: nn.Activation("relu")),
**kwargs):
super(DwsConvBlock, self).__init__(**kwargs)
with self.name_scope():
self.dw_conv = dwconv_block(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
use_bn=dw_use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=dw_activation)
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=pw_use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=pw_activation)
def hybrid_forward(self, F, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
def dwsconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
dw_activation=(lambda: nn.Activation("relu")),
pw_activation=(lambda: nn.Activation("relu")),
**kwargs):
"""
3x3 depthwise separable version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
dw_activation : function or str or None, default nn.Activation('relu')
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default nn.Activation('relu')
Activation function after the pointwise convolution block.
"""
return DwsConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
use_bias=use_bias,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
dw_activation=dw_activation,
pw_activation=pw_activation,
**kwargs)
class PreConvBlock(HybridBlock):
"""
Convolution block with Batch normalization and ReLU pre-activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_use_global_stats=False,
return_preact=False,
activate=True,
**kwargs):
super(PreConvBlock, self).__init__(**kwargs)
self.return_preact = return_preact
self.activate = activate
self.use_bn = use_bn
with self.name_scope():
if self.use_bn:
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
if self.activate:
self.activ = nn.Activation("relu")
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
in_channels=in_channels)
def hybrid_forward(self, F, x):
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
if self.return_preact:
x_pre_activ = x
x = self.conv(x)
if self.return_preact:
return x, x_pre_activ
else:
return x
def pre_conv1x1_block(in_channels,
out_channels,
strides=1,
use_bias=False,
use_bn=True,
bn_use_global_stats=False,
return_preact=False,
activate=True):
"""
1x1 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats,
return_preact=return_preact,
activate=activate)
def pre_conv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_use_global_stats=False,
return_preact=False,
activate=True):
"""
3x3 version of the pre-activated convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
return_preact : bool, default False
Whether return pre-activation.
activate : bool, default True
Whether activate the convolution block.
"""
return PreConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats,
return_preact=return_preact,
activate=activate)
class DeconvBlock(HybridBlock):
"""
Deconvolution block with batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the deconvolution.
padding : int or tuple/list of 2 int
Padding value for deconvolution layer.
out_padding : int or tuple/list of 2 int
Output padding value for deconvolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for deconvolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
out_padding=0,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
super(DeconvBlock, self).__init__(**kwargs)
self.activate = (activation is not None)
self.use_bn = use_bn
with self.name_scope():
self.conv = nn.Conv2DTranspose(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
output_padding=out_padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
in_channels=in_channels)
if self.use_bn:
self.bn = BatchNormExtra(
in_channels=out_channels,
epsilon=bn_epsilon,
use_global_stats=bn_use_global_stats,
cudnn_off=bn_cudnn_off)
if self.activate:
self.activ = get_activation_layer(activation)
def hybrid_forward(self, F, x):
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
class NormActivation(HybridBlock):
"""
Activation block with preliminary batch normalization. It's used by itself as the final block in PreResNet.
Parameters:
----------
in_channels : int
Number of input channels.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
**kwargs):
super(NormActivation, self).__init__(**kwargs)
with self.name_scope():
self.bn = BatchNormExtra(
in_channels=in_channels,
epsilon=bn_epsilon,
use_global_stats=bn_use_global_stats,
cudnn_off=bn_cudnn_off)
self.activ = get_activation_layer(activation)
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
return x
class InterpolationBlock(HybridBlock):
"""
Interpolation block.
Parameters:
----------
scale_factor : int
Multiplier for spatial size.
out_size : tuple of 2 int, default None
Spatial size of the output tensor for the bilinear interpolation operation.
bilinear : bool, default True
Whether to use bilinear interpolation.
up : bool, default True
Whether to upsample or downsample.
"""
def __init__(self,
scale_factor,
out_size=None,
bilinear=True,
up=True,
**kwargs):
super(InterpolationBlock, self).__init__(**kwargs)
self.scale_factor = scale_factor
self.out_size = out_size
self.bilinear = bilinear
self.up = up
def hybrid_forward(self, F, x, size=None):
if self.bilinear or (size is not None):
out_size = self.calc_out_size(x) if size is None else size
return F.contrib.BilinearResize2D(x, height=out_size[0], width=out_size[1])
else:
return F.UpSampling(x, scale=self.scale_factor, sample_type="nearest")
def calc_out_size(self, x):
if self.out_size is not None:
return self.out_size
if self.up:
return tuple(s * self.scale_factor for s in x.shape[2:])
else:
return tuple(s // self.scale_factor for s in x.shape[2:])
def __repr__(self):
s = '{name}(scale_factor={scale_factor}, out_size={out_size}, bilinear={bilinear}, up={up})'
return s.format(
name=self.__class__.__name__,
scale_factor=self.scale_factor,
out_size=self.out_size,
bilinear=self.bilinear,
up=self.up)
def calc_flops(self, x):
assert (x.shape[0] == 1)
if self.bilinear:
num_flops = 9 * x.size
else:
num_flops = 4 * x.size
num_macs = 0
return num_flops, num_macs
def channel_shuffle(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
x : Symbol or NDArray
Input tensor.
groups : int
Number of groups.
Returns:
-------
Symbol or NDArray
Resulted tensor.
"""
return x.reshape((0, -4, groups, -1, -2)).swapaxes(1, 2).reshape((0, -3, -2))
class ChannelShuffle(HybridBlock):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups,
**kwargs):
super(ChannelShuffle, self).__init__(**kwargs)
assert (channels % groups == 0)
self.groups = groups
def hybrid_forward(self, F, x):
return channel_shuffle(x, self.groups)
def __repr__(self):
s = "{name}(groups={groups})"
return s.format(
name=self.__class__.__name__,
groups=self.groups)
def channel_shuffle2(x,
channels_per_group):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083. The alternative version.
Parameters:
----------
x : Symbol or NDArray
Input tensor.
channels_per_group : int
Number of channels per group.
Returns:
-------
Symbol or NDArray
Resulted tensor.
"""
return x.reshape((0, -4, channels_per_group, -1, -2)).swapaxes(1, 2).reshape((0, -3, -2))
class ChannelShuffle2(HybridBlock):
"""
Channel shuffle layer. This is a wrapper over the same operation. It is designed to save the number of groups.
The alternative version.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups,
**kwargs):
super(ChannelShuffle2, self).__init__(**kwargs)
assert (channels % groups == 0)
self.channels_per_group = channels // groups
def hybrid_forward(self, F, x):
return channel_shuffle2(x, self.channels_per_group)
class SEBlock(HybridBlock):
"""
Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
mid_channels : int or None, default None
Number of middle channels.
round_mid : bool, default False
Whether to round middle channel number (make divisible by 8).
use_conv : bool, default True
Whether to convolutional layers instead of fully-connected ones.
activation : function, or str, or HybridBlock, default 'relu'
Activation function after the first convolution.
out_activation : function, or str, or HybridBlock, default 'sigmoid'
Activation function after the last convolution.
"""
def __init__(self,
channels,
reduction=16,
mid_channels=None,
round_mid=False,
use_conv=True,
mid_activation=(lambda: nn.Activation("relu")),
out_activation=(lambda: nn.Activation("sigmoid")),
**kwargs):
super(SEBlock, self).__init__(**kwargs)
self.use_conv = use_conv
if mid_channels is None:
mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)
with self.name_scope():
if use_conv:
self.conv1 = conv1x1(
in_channels=channels,
out_channels=mid_channels,
use_bias=True)
else:
self.fc1 = nn.Dense(
in_units=channels,
units=mid_channels)
self.activ = get_activation_layer(mid_activation)
if use_conv:
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=channels,
use_bias=True)
else:
self.fc2 = nn.Dense(
in_units=mid_channels,
units=channels)
self.sigmoid = get_activation_layer(out_activation)
def hybrid_forward(self, F, x):
w = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
if not self.use_conv:
w = F.Flatten(w)
w = self.conv1(w) if self.use_conv else self.fc1(w)
w = self.activ(w)
w = self.conv2(w) if self.use_conv else self.fc2(w)
w = self.sigmoid(w)
if not self.use_conv:
w = w.expand_dims(2).expand_dims(3)
x = F.broadcast_mul(x, w)
return x
class SABlock(HybridBlock):
"""
Split-Attention block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
out_channels : int
Number of output channels.
groups : int
Number of channel groups (cardinality, without radix).
radix : int
Number of splits within a cardinal group.
reduction : int, default 4
Squeeze reduction value.
min_channels : int, default 32
Minimal number of squeezed channels.
use_conv : bool, default True
Whether to convolutional layers instead of fully-connected ones.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
out_channels,
groups,
radix,
reduction=4,
min_channels=32,
use_conv=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(SABlock, self).__init__(**kwargs)
self.groups = groups
self.radix = radix
self.use_conv = use_conv
in_channels = out_channels * radix
mid_channels = max(in_channels // reduction, min_channels)
with self.name_scope():
if use_conv:
self.conv1 = conv1x1(
in_channels=out_channels,
out_channels=mid_channels,
use_bias=True)
else:
self.fc1 = nn.Dense(
in_units=out_channels,
units=mid_channels)
self.bn = BatchNormExtra(
in_channels=mid_channels,
epsilon=bn_epsilon,
use_global_stats=bn_use_global_stats,
cudnn_off=bn_cudnn_off)
self.activ = nn.Activation("relu")
if use_conv:
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=in_channels,
use_bias=True)
else:
self.fc2 = nn.Dense(
in_units=mid_channels,
units=in_channels)
def hybrid_forward(self, F, x):
x = x.reshape((0, -4, self.radix, -1, -2))
w = x.sum(axis=1)
w = F.contrib.AdaptiveAvgPooling2D(w, output_size=1)
if not self.use_conv:
w = F.Flatten(w)
w = self.conv1(w) if self.use_conv else self.fc1(w)
w = self.bn(w)
w = self.activ(w)
w = self.conv2(w) if self.use_conv else self.fc2(w)
w = w.reshape((0, self.groups, self.radix, -1))
w = w.swapaxes(1, 2)
w = F.softmax(w, axis=1)
w = w.reshape((0, self.radix, -1, 1, 1))
x = F.broadcast_mul(x, w)
x = x.sum(axis=1)
return x
class SAConvBlock(HybridBlock):
"""
Split-Attention convolution block from 'ResNeSt: Split-Attention Networks,' https://arxiv.org/abs/2004.08955.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
activation : function or str or None, default nn.Activation('relu')
Activation function or name of activation function.
radix : int, default 2
Number of splits within a cardinal group.
reduction : int, default 4
Squeeze reduction value.
min_channels : int, default 32
Minimal number of squeezed channels.
use_conv : bool, default True
Whether to convolutional layers instead of fully-connected ones.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
bn_epsilon=1e-5,
bn_use_global_stats=False,
bn_cudnn_off=False,
activation=(lambda: nn.Activation("relu")),
radix=2,
reduction=4,
min_channels=32,
use_conv=True,
**kwargs):
super(SAConvBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = ConvBlock(
in_channels=in_channels,
out_channels=(out_channels * radix),
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=(groups * radix),
use_bias=use_bias,
use_bn=use_bn,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=activation)
self.att = SABlock(
out_channels=out_channels,
groups=groups,
radix=radix,
reduction=reduction,
min_channels=min_channels,
use_conv=use_conv,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.att(x)
return x
def saconv3x3_block(in_channels,
out_channels,
strides=1,
padding=1,
**kwargs):
"""
3x3 version of the Split-Attention convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
"""
return SAConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
**kwargs)
class PixelShuffle(HybridBlock):
"""
Pixel-shuffle operation from 'Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel
Convolutional Neural Network,' https://arxiv.org/abs/1609.05158.
Parameters:
----------
scale_factor : int
Multiplier for spatial size.
in_size : tuple of 2 int
Spatial size of the input heatmap tensor.
fixed_size : bool
Whether to expect fixed spatial size of input image.
"""
def __init__(self,
channels,
scale_factor,
in_size,
fixed_size,
**kwargs):
super(PixelShuffle, self).__init__(**kwargs)
assert (channels % scale_factor % scale_factor == 0)
self.channels = channels
self.scale_factor = scale_factor
self.in_size = in_size
self.fixed_size = fixed_size
def hybrid_forward(self, F, x):
f1 = self.scale_factor
f2 = self.scale_factor
if not self.fixed_size:
x = x.reshape((0, -4, -1, f1 * f2, 0, 0))
x = x.reshape((0, 0, -4, f1, f2, 0, 0))
x = x.transpose((0, 1, 4, 2, 5, 3))
x = x.reshape((0, 0, -3, -3))
else:
new_channels = self.channels // f1 // f2
h, w = self.in_size
x = x.reshape((0, new_channels, f1 * f2, h, w))
x = x.reshape((0, new_channels, f1, f2, h, w))
x = x.transpose((0, 1, 4, 2, 5, 3))
x = x.reshape((0, new_channels, h * f1, w * f2))
return x
class DucBlock(HybridBlock):
"""
Dense Upsampling Convolution (DUC) block from 'Understanding Convolution for Semantic Segmentation,'
https://arxiv.org/abs/1702.08502.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
scale_factor : int
Multiplier for spatial size.
in_size : tuple of 2 int
Spatial size of the input heatmap tensor.
fixed_size : bool
Whether to expect fixed spatial size of input image.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
scale_factor,
in_size,
fixed_size,
bn_use_global_stats,
bn_cudnn_off,
**kwargs):
super(DucBlock, self).__init__(**kwargs)
mid_channels = (scale_factor * scale_factor) * out_channels
with self.name_scope():
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.pix_shuffle = PixelShuffle(
channels=mid_channels,
scale_factor=scale_factor,
in_size=in_size,
fixed_size=fixed_size)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pix_shuffle(x)
return x
def split(x,
sizes,
axis=1):
"""
Splits an array along a particular axis into multiple sub-arrays.
Parameters:
----------
x : Symbol or NDArray
Input tensor.
sizes : tuple/list of int
Sizes of chunks.
axis : int, default 1
Axis along which to split.
Returns:
-------
Tuple of Symbol or NDArray
Resulted tensor.
"""
x_outs = []
begin = 0
for size in sizes:
end = begin + size
x_outs += [x.slice_axis(axis=axis, begin=begin, end=end)]
begin = end
return tuple(x_outs)
class IBN(HybridBlock):
"""
Instance-Batch Normalization block from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : int
Number of channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
inst_fraction : float, default 0.5
The first fraction of channels for normalization.
inst_first : bool, default True
Whether instance normalization be on the first part of channels.
"""
def __init__(self,
channels,
bn_use_global_stats=False,
first_fraction=0.5,
inst_first=True,
**kwargs):
super(IBN, self).__init__(**kwargs)
self.inst_first = inst_first
h1_channels = int(math.floor(channels * first_fraction))
h2_channels = channels - h1_channels
self.split_sections = [h1_channels, h2_channels]
if self.inst_first:
self.inst_norm = nn.InstanceNorm(
in_channels=h1_channels,
scale=True)
self.batch_norm = nn.BatchNorm(
in_channels=h2_channels,
use_global_stats=bn_use_global_stats)
else:
self.batch_norm = nn.BatchNorm(
in_channels=h1_channels,
use_global_stats=bn_use_global_stats)
self.inst_norm = nn.InstanceNorm(
in_channels=h2_channels,
scale=True)
def hybrid_forward(self, F, x):
x1, x2 = split(x, sizes=self.split_sections, axis=1)
if self.inst_first:
x1 = self.inst_norm(x1)
x2 = self.batch_norm(x2)
else:
x1 = self.batch_norm(x1)
x2 = self.inst_norm(x2)
x = F.concat(x1, x2, dim=1)
return x
class DualPathSequential(nn.HybridSequential):
"""
A sequential container for hybrid blocks with dual inputs/outputs.
Blocks will be executed in the order they are added.
Parameters:
----------
return_two : bool, default True
Whether to return two output after execution.
first_ordinals : int, default 0
Number of the first blocks with single input/output.
last_ordinals : int, default 0
Number of the final blocks with single input/output.
dual_path_scheme : function
Scheme of dual path response for a block.
dual_path_scheme_ordinal : function
Scheme of dual path response for an ordinal block.
"""
def __init__(self,
return_two=True,
first_ordinals=0,
last_ordinals=0,
dual_path_scheme=(lambda block, x1, x2: block(x1, x2)),
dual_path_scheme_ordinal=(lambda block, x1, x2: (block(x1), x2)),
**kwargs):
super(DualPathSequential, self).__init__(**kwargs)
self.return_two = return_two
self.first_ordinals = first_ordinals
self.last_ordinals = last_ordinals
self.dual_path_scheme = dual_path_scheme
self.dual_path_scheme_ordinal = dual_path_scheme_ordinal
def hybrid_forward(self, F, x1, x2=None):
length = len(self._children.values())
for i, block in enumerate(self._children.values()):
if (i < self.first_ordinals) or (i >= length - self.last_ordinals):
x1, x2 = self.dual_path_scheme_ordinal(block, x1, x2)
else:
x1, x2 = self.dual_path_scheme(block, x1, x2)
if self.return_two:
return x1, x2
else:
return x1
class ParametricSequential(nn.HybridSequential):
"""
A sequential container for blocks with parameters.
Blocks will be executed in the order they are added.
"""
def __init__(self,
**kwargs):
super(ParametricSequential, self).__init__(**kwargs)
def hybrid_forward(self, F, x, *args, **kwargs):
for block in self._children.values():
x = block(x, *args, **kwargs)
return x
class Concurrent(nn.HybridSequential):
"""
A container for concatenation of blocks on the base of the sequential container.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
stack : bool, default False
Whether to concatenate tensors along a new dimension.
merge_type : str, default None
Type of branch merging.
branches : list of HybridBlock, default None
Whether to concatenate tensors along a new dimension.
"""
def __init__(self,
axis=1,
stack=False,
merge_type=None,
branches=None,
**kwargs):
super(Concurrent, self).__init__(**kwargs)
assert (merge_type is None) or (merge_type in ["cat", "stack", "sum"])
self.axis = axis
self.stack = stack
if merge_type is not None:
self.merge_type = merge_type
else:
self.merge_type = "stack" if stack else "cat"
if branches is not None:
with self.name_scope():
for branch in branches:
self.add(branch)
def hybrid_forward(self, F, x):
out = []
for block in self._children.values():
out.append(block(x))
if self.merge_type == "stack":
out = F.stack(*out, axis=self.axis)
elif self.merge_type == "cat":
out = F.concat(*out, dim=self.axis)
elif self.merge_type == "sum":
out = F.stack(*out, axis=self.axis).sum(axis=self.axis)
else:
raise NotImplementedError()
return out
class SequentialConcurrent(nn.HybridSequential):
"""
A sequential container with concatenated outputs.
Blocks will be executed in the order they are added.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
stack : bool, default False
Whether to concatenate tensors along a new dimension.
cat_input : bool, default True
Whether to concatenate input tensor.
"""
def __init__(self,
axis=1,
stack=False,
cat_input=True,
**kwargs):
super(SequentialConcurrent, self).__init__(**kwargs)
self.axis = axis
self.stack = stack
self.cat_input = cat_input
def hybrid_forward(self, F, x):
out = [x] if self.cat_input else []
for block in self._children.values():
x = block(x)
out.append(x)
if self.stack:
out = F.stack(*out, axis=self.axis)
else:
out = F.concat(*out, dim=self.axis)
return out
class ParametricConcurrent(nn.HybridSequential):
"""
A container for concatenation of blocks with parameters.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
"""
def __init__(self,
axis=1,
**kwargs):
super(ParametricConcurrent, self).__init__(**kwargs)
self.axis = axis
def hybrid_forward(self, F, x, *args, **kwargs):
out = []
for block in self._children.values():
out.append(block(x, *args, **kwargs))
out = F.concat(*out, dim=self.axis)
return out
class Hourglass(HybridBlock):
"""
A hourglass block.
Parameters:
----------
down_seq : nn.HybridSequential
Down modules as sequential.
up_seq : nn.HybridSequential
Up modules as sequential.
skip_seq : nn.HybridSequential
Skip connection modules as sequential.
merge_type : str, default 'add'
Type of concatenation of up and skip outputs.
return_first_skip : bool, default False
Whether return the first skip connection output. Used in ResAttNet.
"""
def __init__(self,
down_seq,
up_seq,
skip_seq,
merge_type="add",
return_first_skip=False,
**kwargs):
super(Hourglass, self).__init__(**kwargs)
self.depth = len(down_seq)
assert (merge_type in ["cat", "add"])
assert (len(up_seq) == self.depth)
assert (len(skip_seq) in (self.depth, self.depth + 1))
self.merge_type = merge_type
self.return_first_skip = return_first_skip
self.extra_skip = (len(skip_seq) == self.depth + 1)
with self.name_scope():
self.down_seq = down_seq
self.up_seq = up_seq
self.skip_seq = skip_seq
def _merge(self, F, x, y):
if y is not None:
if self.merge_type == "cat":
x = F.concat(x, y, dim=1)
elif self.merge_type == "add":
x = x + y
return x
def hybrid_forward(self, F, x):
y = None
down_outs = [x]
for down_module in self.down_seq._children.values():
x = down_module(x)
down_outs.append(x)
for i in range(len(down_outs)):
if i != 0:
y = down_outs[self.depth - i]
skip_module = self.skip_seq[self.depth - i]
y = skip_module(y)
x = self._merge(F, x, y)
if i != len(down_outs) - 1:
if (i == 0) and self.extra_skip:
skip_module = self.skip_seq[self.depth]
x = skip_module(x)
up_module = self.up_seq[self.depth - 1 - i]
x = up_module(x)
if self.return_first_skip:
return x, y
else:
return x
class SesquialteralHourglass(HybridBlock):
"""
A sesquialteral hourglass block.
Parameters:
----------
down1_seq : nn.Sequential
The first down modules as sequential.
skip1_seq : nn.Sequential
The first skip connection modules as sequential.
up_seq : nn.Sequential
Up modules as sequential.
skip2_seq : nn.Sequential
The second skip connection modules as sequential.
down2_seq : nn.Sequential
The second down modules as sequential.
merge_type : str, default 'cat'
Type of concatenation of up and skip outputs.
"""
def __init__(self,
down1_seq,
skip1_seq,
up_seq,
skip2_seq,
down2_seq,
merge_type="cat",
**kwargs):
super(SesquialteralHourglass, self).__init__(**kwargs)
assert (len(down1_seq) == len(up_seq))
assert (len(down1_seq) == len(down2_seq))
assert (len(skip1_seq) == len(skip2_seq))
assert (len(down1_seq) == len(skip1_seq) - 1)
assert (merge_type in ["cat", "add"])
self.merge_type = merge_type
self.depth = len(down1_seq)
with self.name_scope():
self.down1_seq = down1_seq
self.skip1_seq = skip1_seq
self.up_seq = up_seq
self.skip2_seq = skip2_seq
self.down2_seq = down2_seq
def _merge(self, F, x, y):
if y is not None:
if self.merge_type == "cat":
x = F.concat(x, y, dim=1)
elif self.merge_type == "add":
x = x + y
return x
def hybrid_forward(self, F, x):
y = self.skip1_seq[0](x)
skip1_outs = [y]
for i in range(self.depth):
x = self.down1_seq[i](x)
y = self.skip1_seq[i + 1](x)
skip1_outs.append(y)
x = skip1_outs[self.depth]
y = self.skip2_seq[0](x)
skip2_outs = [y]
for i in range(self.depth):
x = self.up_seq[i](x)
y = skip1_outs[self.depth - 1 - i]
x = self._merge(F, x, y)
y = self.skip2_seq[i + 1](x)
skip2_outs.append(y)
x = self.skip2_seq[self.depth](x)
for i in range(self.depth):
x = self.down2_seq[i](x)
y = skip2_outs[self.depth - 1 - i]
x = self._merge(F, x, y)
return x
class MultiOutputSequential(nn.HybridSequential):
"""
A sequential container with multiple outputs.
Blocks will be executed in the order they are added.
Parameters:
----------
multi_output : bool, default True
Whether to return multiple output.
dual_output : bool, default False
Whether to return dual output.
return_last : bool, default True
Whether to forcibly return last value.
"""
def __init__(self,
multi_output=True,
dual_output=False,
return_last=True,
**kwargs):
super(MultiOutputSequential, self).__init__(**kwargs)
self.multi_output = multi_output
self.dual_output = dual_output
self.return_last = return_last
def hybrid_forward(self, F, x):
outs = []
for block in self._children.values():
x = block(x)
if hasattr(block, "do_output") and block.do_output:
outs.append(x)
elif hasattr(block, "do_output2") and block.do_output2:
assert (type(x) == tuple)
outs.extend(x[1])
x = x[0]
if self.multi_output:
return [x] + outs if self.return_last else outs
elif self.dual_output:
return x, outs
else:
return x
class ParallelConcurent(nn.HybridSequential):
"""
A sequential container with multiple inputs and multiple outputs.
Modules will be executed in the order they are added.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
merge_type : str, default 'list'
Type of branch merging.
"""
def __init__(self,
axis=1,
merge_type="list",
**kwargs):
super(ParallelConcurent, self).__init__(**kwargs)
assert (merge_type is None) or (merge_type in ["list", "cat", "stack", "sum"])
self.axis = axis
self.merge_type = merge_type
def hybrid_forward(self, F, x):
out = []
for block, xi in zip(self._children.values(), x):
out.append(block(xi))
if self.merge_type == "list":
pass
elif self.merge_type == "stack":
out = F.stack(*out, axis=self.axis)
elif self.merge_type == "cat":
out = F.concat(*out, dim=self.axis)
elif self.merge_type == "sum":
out = F.stack(*out, axis=self.axis).sum(axis=self.axis)
else:
raise NotImplementedError()
return out
class DualPathParallelConcurent(nn.HybridSequential):
"""
A sequential container with multiple dual-path inputs and single/multiple outputs.
Blocks will be executed in the order they are added.
Parameters:
----------
axis : int, default 1
The axis on which to concatenate the outputs.
merge_type : str, default 'list'
Type of branch merging.
"""
def __init__(self,
axis=1,
merge_type="list",
**kwargs):
super(DualPathParallelConcurent, self).__init__(**kwargs)
assert (merge_type is None) or (merge_type in ["list", "cat", "stack", "sum"])
self.axis = axis
self.merge_type = merge_type
def hybrid_forward(self, F, x1, x2):
x1_out = []
x2_out = []
for block, x1i, x2i in zip(self._children.values(), x1, x2):
y1i, y2i = block(x1i, x2i)
x1_out.append(y1i)
x2_out.append(y2i)
if self.merge_type == "list":
pass
elif self.merge_type == "stack":
x1_out = F.stack(*x1_out, axis=self.axis)
x2_out = F.stack(*x2_out, axis=self.axis)
elif self.merge_type == "cat":
x1_out = F.concat(*x1_out, dim=self.axis)
x2_out = F.concat(*x2_out, dim=self.axis)
elif self.merge_type == "sum":
x1_out = F.stack(*x1_out, axis=self.axis).sum(axis=self.axis)
x2_out = F.stack(*x2_out, axis=self.axis).sum(axis=self.axis)
else:
raise NotImplementedError()
return x1_out, x2_out
class HeatmapMaxDetBlock(HybridBlock):
"""
Heatmap maximum detector block (for human pose estimation task).
Parameters:
----------
channels : int
Number of channels.
in_size : tuple of 2 int
Spatial size of the input heatmap tensor.
fixed_size : bool
Whether to expect fixed spatial size of input image.
tune : bool, default True
Whether to tune point positions.
"""
def __init__(self,
channels,
in_size,
fixed_size,
tune=True,
**kwargs):
super(HeatmapMaxDetBlock, self).__init__(**kwargs)
self.channels = channels
self.in_size = in_size
self.fixed_size = fixed_size
self.tune = tune
def hybrid_forward(self, F, x):
# assert (not self.fixed_size) or (self.in_size == x.shape[2:])
vector_dim = 2
in_size = self.in_size if self.fixed_size else x.shape[2:]
heatmap_vector = x.reshape((0, 0, -3))
indices = heatmap_vector.argmax(axis=vector_dim, keepdims=True)
scores = heatmap_vector.max(axis=vector_dim, keepdims=True)
scores_mask = (scores > 0.0)
pts_x = (indices % in_size[1]) * scores_mask
pts_y = (indices / in_size[1]).floor() * scores_mask
pts = F.concat(pts_x, pts_y, scores, dim=vector_dim)
if self.tune:
batch = x.shape[0]
for b in range(batch):
for k in range(self.channels):
hm = x[b, k, :, :]
px = int(pts[b, k, 0].asscalar())
py = int(pts[b, k, 1].asscalar())
if (0 < px < in_size[1] - 1) and (0 < py < in_size[0] - 1):
pts[b, k, 0] += (hm[py, px + 1] - hm[py, px - 1]).sign() * 0.25
pts[b, k, 1] += (hm[py + 1, px] - hm[py - 1, px]).sign() * 0.25
return pts
def __repr__(self):
s = "{name}(channels={channels}, in_size={in_size}, fixed_size={fixed_size})"
return s.format(
name=self.__class__.__name__,
channels=self.channels,
in_size=self.in_size,
fixed_size=self.fixed_size)
def calc_flops(self, x):
assert (x.shape[0] == 1)
num_flops = x.size + 26 * self.channels
num_macs = 0
return num_flops, num_macs
| 84,759 | 32.501976 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/lwopenpose_cmupan.py | """
Lightweight OpenPose 2D/3D for CMU Panoptic, implemented in Gluon.
Original paper: 'Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose,'
https://arxiv.org/abs/1811.12004.
"""
__all__ = ['LwOpenPose', 'lwopenpose2d_mobilenet_cmupan_coco', 'lwopenpose3d_mobilenet_cmupan_coco',
'LwopDecoderFinalBlock']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv1x1_block, conv3x3_block, dwsconv3x3_block
class LwopResBottleneck(HybridBlock):
"""
Bottleneck block for residual path in the residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default True
Whether the layer uses a bias vector.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck_factor : int, default 2
Bottleneck factor.
squeeze_out : bool, default False
Whether to squeeze the output channels.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_bias=True,
bn_use_global_stats=False,
bottleneck_factor=2,
squeeze_out=False,
**kwargs):
super(LwopResBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // bottleneck_factor if squeeze_out else in_channels // bottleneck_factor
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_bias=use_bias,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
use_bias=use_bias,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=use_bias,
activation=None,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class LwopResUnit(HybridBlock):
"""
ResNet-like residual unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
use_bias : bool, default True
Whether the layer uses a bias vector.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck_factor : int, default 2
Bottleneck factor.
squeeze_out : bool, default False
Whether to squeeze the output channels.
activate : bool, default False
Whether to activate the sum.
"""
def __init__(self,
in_channels,
out_channels,
strides=1,
use_bias=True,
bn_use_global_stats=False,
bottleneck_factor=2,
squeeze_out=False,
activate=False,
**kwargs):
super(LwopResUnit, self).__init__(**kwargs)
self.activate = activate
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = LwopResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
bn_use_global_stats=bn_use_global_stats,
bottleneck_factor=bottleneck_factor,
squeeze_out=squeeze_out)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
bn_use_global_stats=bn_use_global_stats,
activation=None)
if self.activate:
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
if self.activate:
x = self.activ(x)
return x
class LwopEncoderFinalBlock(HybridBlock):
"""
Lightweight OpenPose 2D/3D specific encoder final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats=False,
**kwargs):
super(LwopEncoderFinalBlock, self).__init__(**kwargs)
with self.name_scope():
self.pre_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
use_bn=False,
bn_use_global_stats=bn_use_global_stats)
self.body = nn.HybridSequential(prefix="")
for i in range(3):
self.body.add(dwsconv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
dw_use_bn=False,
pw_use_bn=False,
bn_use_global_stats=bn_use_global_stats,
dw_activation=(lambda: nn.ELU()),
pw_activation=(lambda: nn.ELU())))
self.post_conv = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=True,
use_bn=False,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.pre_conv(x)
x = x + self.body(x)
x = self.post_conv(x)
return x
class LwopRefinementBlock(HybridBlock):
"""
Lightweight OpenPose 2D/3D specific refinement block for decoder units.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats=False,
**kwargs):
super(LwopRefinementBlock, self).__init__(**kwargs)
with self.name_scope():
self.pre_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=True,
use_bn=False,
bn_use_global_stats=bn_use_global_stats)
self.body = nn.HybridSequential(prefix="")
self.body.add(conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
use_bias=True,
bn_use_global_stats=bn_use_global_stats))
self.body.add(conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
padding=2,
dilation=2,
use_bias=True,
bn_use_global_stats=bn_use_global_stats))
def hybrid_forward(self, F, x):
x = self.pre_conv(x)
x = x + self.body(x)
return x
class LwopDecoderBend(HybridBlock):
"""
Lightweight OpenPose 2D/3D specific decoder bend block.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
mid_channels,
out_channels,
bn_use_global_stats=False,
**kwargs):
super(LwopDecoderBend, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_bias=True,
use_bn=False,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class LwopDecoderInitBlock(HybridBlock):
"""
Lightweight OpenPose 2D/3D specific decoder init block.
Parameters:
----------
in_channels : int
Number of input channels.
keypoints : int
Number of keypoints.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
keypoints,
bn_use_global_stats=False,
**kwargs):
super(LwopDecoderInitBlock, self).__init__(**kwargs)
num_heatmap = keypoints
num_paf = 2 * keypoints
bend_mid_channels = 512
with self.name_scope():
self.body = nn.HybridSequential(prefix="")
for i in range(3):
self.body.add(conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
use_bias=True,
use_bn=False,
bn_use_global_stats=bn_use_global_stats))
self.heatmap_bend = LwopDecoderBend(
in_channels=in_channels,
mid_channels=bend_mid_channels,
out_channels=num_heatmap,
bn_use_global_stats=bn_use_global_stats)
self.paf_bend = LwopDecoderBend(
in_channels=in_channels,
mid_channels=bend_mid_channels,
out_channels=num_paf,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
y = self.body(x)
heatmap = self.heatmap_bend(y)
paf = self.paf_bend(y)
y = F.concat(x, heatmap, paf, dim=1)
return y
class LwopDecoderUnit(HybridBlock):
"""
Lightweight OpenPose 2D/3D specific decoder init.
Parameters:
----------
in_channels : int
Number of input channels.
keypoints : int
Number of keypoints.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
keypoints,
bn_use_global_stats=False,
**kwargs):
super(LwopDecoderUnit, self).__init__(**kwargs)
num_heatmap = keypoints
num_paf = 2 * keypoints
self.features_channels = in_channels - num_heatmap - num_paf
with self.name_scope():
self.body = nn.HybridSequential(prefix="")
for i in range(5):
self.body.add(LwopRefinementBlock(
in_channels=in_channels,
out_channels=self.features_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = self.features_channels
self.heatmap_bend = LwopDecoderBend(
in_channels=self.features_channels,
mid_channels=self.features_channels,
out_channels=num_heatmap,
bn_use_global_stats=bn_use_global_stats)
self.paf_bend = LwopDecoderBend(
in_channels=self.features_channels,
mid_channels=self.features_channels,
out_channels=num_paf,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
features = F.slice_axis(x, axis=1, begin=0, end=self.features_channels)
y = self.body(x)
heatmap = self.heatmap_bend(y)
paf = self.paf_bend(y)
y = F.concat(features, heatmap, paf, dim=1)
return y
class LwopDecoderFeaturesBend(HybridBlock):
"""
Lightweight OpenPose 2D/3D specific decoder 3D features bend.
Parameters:
----------
in_channels : int
Number of input channels.
mid_channels : int
Number of middle channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
mid_channels,
out_channels,
bn_use_global_stats=False,
**kwargs):
super(LwopDecoderFeaturesBend, self).__init__(**kwargs)
with self.name_scope():
self.body = nn.HybridSequential(prefix="")
for i in range(2):
self.body.add(LwopRefinementBlock(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = mid_channels
self.features_bend = LwopDecoderBend(
in_channels=mid_channels,
mid_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.body(x)
x = self.features_bend(x)
return x
class LwopDecoderFinalBlock(HybridBlock):
"""
Lightweight OpenPose 2D/3D specific decoder final block for calcualation 3D poses.
Parameters:
----------
in_channels : int
Number of input channels.
keypoints : int
Number of keypoints.
bottleneck_factor : int
Bottleneck factor.
calc_3d_features : bool
Whether to calculate 3D features.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
keypoints,
bottleneck_factor,
calc_3d_features,
bn_use_global_stats=False,
**kwargs):
super(LwopDecoderFinalBlock, self).__init__(**kwargs)
self.num_heatmap_paf = 3 * keypoints
self.calc_3d_features = calc_3d_features
features_out_channels = self.num_heatmap_paf
features_in_channels = in_channels - features_out_channels
if self.calc_3d_features:
with self.name_scope():
self.body = nn.HybridSequential(prefix="")
for i in range(5):
self.body.add(LwopResUnit(
in_channels=in_channels,
out_channels=features_in_channels,
bottleneck_factor=bottleneck_factor,
bn_use_global_stats=bn_use_global_stats))
in_channels = features_in_channels
self.features_bend = LwopDecoderFeaturesBend(
in_channels=features_in_channels,
mid_channels=features_in_channels,
out_channels=features_out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
heatmap_paf_2d = F.slice_axis(x, axis=1, begin=-self.num_heatmap_paf, end=None)
if not self.calc_3d_features:
return heatmap_paf_2d
x = self.body(x)
x = self.features_bend(x)
y = F.concat(heatmap_paf_2d, x, dim=1)
return y
class LwOpenPose(HybridBlock):
"""
Lightweight OpenPose 2D/3D model from 'Real-time 2D Multi-Person Pose Estimation on CPU: Lightweight OpenPose,'
https://arxiv.org/abs/1811.12004.
Parameters:
----------
encoder_channels : list of list of int
Number of output channels for each encoder unit.
encoder_paddings : list of list of int
Padding/dilation value for each encoder unit.
encoder_init_block_channels : int
Number of output channels for the encoder initial unit.
encoder_final_block_channels : int
Number of output channels for the encoder final unit.
refinement_units : int
Number of refinement blocks in the decoder.
calc_3d_features : bool
Whether to calculate 3D features.
return_heatmap : bool, default True
Whether to return only heatmap.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (256, 192)
Spatial size of the expected input image.
keypoints : int, default 19
Number of keypoints.
"""
def __init__(self,
encoder_channels,
encoder_paddings,
encoder_init_block_channels,
encoder_final_block_channels,
refinement_units,
calc_3d_features,
return_heatmap=True,
bn_use_global_stats=False,
in_channels=3,
in_size=(368, 368),
keypoints=19,
**kwargs):
super(LwOpenPose, self).__init__(**kwargs)
assert (in_channels == 3)
self.in_size = in_size
self.keypoints = keypoints
self.return_heatmap = return_heatmap
self.calc_3d_features = calc_3d_features
num_heatmap_paf = 3 * keypoints
with self.name_scope():
self.encoder = nn.HybridSequential(prefix="")
backbone = nn.HybridSequential(prefix="")
backbone.add(conv3x3_block(
in_channels=in_channels,
out_channels=encoder_init_block_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats))
in_channels = encoder_init_block_channels
for i, channels_per_stage in enumerate(encoder_channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
padding = encoder_paddings[i][j]
stage.add(dwsconv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=padding,
dilation=padding,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
backbone.add(stage)
self.encoder.add(backbone)
self.encoder.add(LwopEncoderFinalBlock(
in_channels=in_channels,
out_channels=encoder_final_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = encoder_final_block_channels
self.decoder = nn.HybridSequential(prefix="")
self.decoder.add(LwopDecoderInitBlock(
in_channels=in_channels,
keypoints=keypoints,
bn_use_global_stats=bn_use_global_stats))
in_channels = encoder_final_block_channels + num_heatmap_paf
for i in range(refinement_units):
self.decoder.add(LwopDecoderUnit(
in_channels=in_channels,
keypoints=keypoints,
bn_use_global_stats=bn_use_global_stats))
self.decoder.add(LwopDecoderFinalBlock(
in_channels=in_channels,
keypoints=keypoints,
bottleneck_factor=2,
calc_3d_features=calc_3d_features,
bn_use_global_stats=bn_use_global_stats))
def hybrid_forward(self, F, x):
x = self.encoder(x)
x = self.decoder(x)
if self.return_heatmap:
return x
else:
return x
def get_lwopenpose(calc_3d_features,
keypoints,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create Lightweight OpenPose 2D/3D model with specific parameters.
Parameters:
----------
calc_3d_features : bool, default False
Whether to calculate 3D features.
keypoints : int
Number of keypoints.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
encoder_channels = [[64], [128, 128], [256, 256, 512, 512, 512, 512, 512, 512]]
encoder_paddings = [[1], [1, 1], [1, 1, 1, 2, 1, 1, 1, 1]]
encoder_init_block_channels = 32
encoder_final_block_channels = 128
refinement_units = 1
net = LwOpenPose(
encoder_channels=encoder_channels,
encoder_paddings=encoder_paddings,
encoder_init_block_channels=encoder_init_block_channels,
encoder_final_block_channels=encoder_final_block_channels,
refinement_units=refinement_units,
calc_3d_features=calc_3d_features,
keypoints=keypoints,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def lwopenpose2d_mobilenet_cmupan_coco(keypoints=19, **kwargs):
"""
Lightweight OpenPose 2D model on the base of MobileNet for CMU Panoptic from 'Real-time 2D Multi-Person Pose
Estimation on CPU: Lightweight OpenPose,' https://arxiv.org/abs/1811.12004.
Parameters:
----------
keypoints : int, default 19
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_lwopenpose(calc_3d_features=False, keypoints=keypoints, model_name="lwopenpose2d_mobilenet_cmupan_coco",
**kwargs)
def lwopenpose3d_mobilenet_cmupan_coco(keypoints=19, **kwargs):
"""
Lightweight OpenPose 3D model on the base of MobileNet for CMU Panoptic from 'Real-time 2D Multi-Person Pose
Estimation on CPU: Lightweight OpenPose,' https://arxiv.org/abs/1811.12004.
Parameters:
----------
keypoints : int, default 19
Number of keypoints.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_lwopenpose(calc_3d_features=True, keypoints=keypoints, model_name="lwopenpose3d_mobilenet_cmupan_coco",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (368, 368)
keypoints = 19
return_heatmap = True
pretrained = False
models = [
(lwopenpose2d_mobilenet_cmupan_coco, "2d"),
(lwopenpose3d_mobilenet_cmupan_coco, "3d"),
]
for model, model_dim in models:
net = model(pretrained=pretrained, in_size=in_size, return_heatmap=return_heatmap)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != lwopenpose2d_mobilenet_cmupan_coco or weight_count == 4091698)
assert (model != lwopenpose3d_mobilenet_cmupan_coco or weight_count == 5085983)
batch = 14
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
if model_dim == "2d":
assert (y.shape == (batch, 3 * keypoints, in_size[0] // 8, in_size[0] // 8))
else:
assert (y.shape == (batch, 6 * keypoints, in_size[0] // 8, in_size[0] // 8))
if __name__ == "__main__":
_test()
| 26,308 | 35.138736 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/rir_cifar.py | """
RiR for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Resnet in Resnet: Generalizing Residual Architectures,' https://arxiv.org/abs/1603.08029.
"""
__all__ = ['CIFARRiR', 'rir_cifar10', 'rir_cifar100', 'rir_svhn', 'RiRFinalBlock']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv3x3, conv1x1_block, conv3x3_block, DualPathSequential
class PostActivation(HybridBlock):
"""
Pure pre-activation block without convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_use_global_stats,
**kwargs):
super(PostActivation, self).__init__(**kwargs)
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
return x
class RiRUnit(HybridBlock):
"""
RiR unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
**kwargs):
super(RiRUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.res_pass_conv = conv3x3(
in_channels=in_channels,
out_channels=out_channels,
strides=strides)
self.trans_pass_conv = conv3x3(
in_channels=in_channels,
out_channels=out_channels,
strides=strides)
self.res_cross_conv = conv3x3(
in_channels=in_channels,
out_channels=out_channels,
strides=strides)
self.trans_cross_conv = conv3x3(
in_channels=in_channels,
out_channels=out_channels,
strides=strides)
self.res_postactiv = PostActivation(
in_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
self.trans_postactiv = PostActivation(
in_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
strides=strides)
def hybrid_forward(self, F, x_res, x_trans):
if self.resize_identity:
x_res_identity = self.identity_conv(x_res)
else:
x_res_identity = x_res
y_res = self.res_cross_conv(x_res)
y_trans = self.trans_cross_conv(x_trans)
x_res = self.res_pass_conv(x_res)
x_trans = self.trans_pass_conv(x_trans)
x_res = x_res + x_res_identity + y_trans
x_trans = x_trans + y_res
x_res = self.res_postactiv(x_res)
x_trans = self.trans_postactiv(x_trans)
return x_res, x_trans
class RiRInitBlock(HybridBlock):
"""
RiR initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(RiRInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.res_conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
self.trans_conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x, _):
x_res = self.res_conv(x)
x_trans = self.trans_conv(x)
return x_res, x_trans
class RiRFinalBlock(HybridBlock):
"""
RiR final block.
"""
def __init__(self):
super(RiRFinalBlock, self).__init__()
def hybrid_forward(self, F, x_res, x_trans):
x = F.concat(x_res, x_trans, dim=1)
return x, None
class CIFARRiR(HybridBlock):
"""
RiR model for CIFAR from 'Resnet in Resnet: Generalizing Residual Architectures,' https://arxiv.org/abs/1603.08029.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARRiR, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = DualPathSequential(
return_two=False,
first_ordinals=0,
last_ordinals=0,
prefix="")
self.features.add(RiRInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential(prefix="stage{}_".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(RiRUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(RiRFinalBlock())
in_channels = final_block_channels
self.output = nn.HybridSequential(prefix="")
self.output.add(conv1x1_block(
in_channels=in_channels,
out_channels=classes,
bn_use_global_stats=bn_use_global_stats,
activation=None))
self.output.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x, x)
x = self.output(x)
return x
def get_rir_cifar(classes,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create RiR model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = [[48, 48, 48, 48], [96, 96, 96, 96, 96, 96], [192, 192, 192, 192, 192, 192]]
init_block_channels = 48
final_block_channels = 384
net = CIFARRiR(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def rir_cifar10(classes=10, **kwargs):
"""
RiR model for CIFAR-10 from 'Resnet in Resnet: Generalizing Residual Architectures,'
https://arxiv.org/abs/1603.08029.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_rir_cifar(classes=classes, model_name="rir_cifar10", **kwargs)
def rir_cifar100(classes=100, **kwargs):
"""
RiR model for CIFAR-100 from 'Resnet in Resnet: Generalizing Residual Architectures,'
https://arxiv.org/abs/1603.08029.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_rir_cifar(classes=classes, model_name="rir_cifar100", **kwargs)
def rir_svhn(classes=10, **kwargs):
"""
RiR model for SVHN from 'Resnet in Resnet: Generalizing Residual Architectures,'
https://arxiv.org/abs/1603.08029.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_rir_cifar(classes=classes, model_name="rir_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(rir_cifar10, 10),
(rir_cifar100, 100),
(rir_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != rir_cifar10 or weight_count == 9492980)
assert (model != rir_cifar100 or weight_count == 9527720)
assert (model != rir_svhn or weight_count == 9492980)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 12,481 | 31.761155 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/diapreresnet.py | """
DIA-PreResNet for ImageNet-1K, implemented in Gluon.
Original papers: 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
"""
__all__ = ['DIAPreResNet', 'diapreresnet10', 'diapreresnet12', 'diapreresnet14', 'diapreresnetbc14b', 'diapreresnet16',
'diapreresnet18', 'diapreresnet26', 'diapreresnetbc26b', 'diapreresnet34', 'diapreresnetbc38b',
'diapreresnet50', 'diapreresnet50b', 'diapreresnet101', 'diapreresnet101b', 'diapreresnet152',
'diapreresnet152b', 'diapreresnet200', 'diapreresnet200b', 'diapreresnet269b', 'DIAPreResUnit']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, DualPathSequential
from .preresnet import PreResBlock, PreResBottleneck, PreResInitBlock, PreResActivation
from .diaresnet import DIAAttention
class DIAPreResUnit(HybridBlock):
"""
DIA-PreResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
attention : nn.Module, default None
Attention module.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
conv1_stride,
attention=None,
**kwargs):
super(DIAPreResUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if bottleneck:
self.body = PreResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
conv1_stride=conv1_stride)
else:
self.body = PreResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
strides=strides)
self.attention = attention
def hybrid_forward(self, F, x, hc=None):
identity = x
x, x_pre_activ = self.body(x)
if self.resize_identity:
identity = self.identity_conv(x_pre_activ)
x, hc = self.attention(x, hc)
x = x + identity
return x, hc
class DIAPreResNet(HybridBlock):
"""
DIA-PreResNet model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(DIAPreResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = DualPathSequential(
return_two=False,
prefix="stage{}_".format(i + 1))
attention = DIAAttention(
in_x_features=channels_per_stage[0],
in_h_features=channels_per_stage[0])
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(DIAPreResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
attention=attention))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_diapreresnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DIA-PreResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported DIA-PreResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = DIAPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def diapreresnet10(**kwargs):
"""
DIA-PreResNet-10 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=10, model_name="diapreresnet10", **kwargs)
def diapreresnet12(**kwargs):
"""
DIA-PreResNet-12 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=12, model_name="diapreresnet12", **kwargs)
def diapreresnet14(**kwargs):
"""
DIA-PreResNet-14 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=14, model_name="diapreresnet14", **kwargs)
def diapreresnetbc14b(**kwargs):
"""
DIA-PreResNet-BC-14b model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="diapreresnetbc14b", **kwargs)
def diapreresnet16(**kwargs):
"""
DIA-PreResNet-16 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=16, model_name="diapreresnet16", **kwargs)
def diapreresnet18(**kwargs):
"""
DIA-PreResNet-18 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=18, model_name="diapreresnet18", **kwargs)
def diapreresnet26(**kwargs):
"""
DIA-PreResNet-26 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=26, bottleneck=False, model_name="diapreresnet26", **kwargs)
def diapreresnetbc26b(**kwargs):
"""
DIA-PreResNet-BC-26b model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="diapreresnetbc26b", **kwargs)
def diapreresnet34(**kwargs):
"""
DIA-PreResNet-34 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=34, model_name="diapreresnet34", **kwargs)
def diapreresnetbc38b(**kwargs):
"""
DIA-PreResNet-BC-38b model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="diapreresnetbc38b", **kwargs)
def diapreresnet50(**kwargs):
"""
DIA-PreResNet-50 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=50, model_name="diapreresnet50", **kwargs)
def diapreresnet50b(**kwargs):
"""
DIA-PreResNet-50 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit
Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=50, conv1_stride=False, model_name="diapreresnet50b", **kwargs)
def diapreresnet101(**kwargs):
"""
DIA-PreResNet-101 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=101, model_name="diapreresnet101", **kwargs)
def diapreresnet101b(**kwargs):
"""
DIA-PreResNet-101 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit
Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=101, conv1_stride=False, model_name="diapreresnet101b", **kwargs)
def diapreresnet152(**kwargs):
"""
DIA-PreResNet-152 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=152, model_name="diapreresnet152", **kwargs)
def diapreresnet152b(**kwargs):
"""
DIA-PreResNet-152 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit
Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=152, conv1_stride=False, model_name="diapreresnet152b", **kwargs)
def diapreresnet200(**kwargs):
"""
DIA-PreResNet-200 model from 'DIANet: Dense-and-Implicit Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=200, model_name="diapreresnet200", **kwargs)
def diapreresnet200b(**kwargs):
"""
DIA-PreResNet-200 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit
Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=200, conv1_stride=False, model_name="diapreresnet200b", **kwargs)
def diapreresnet269b(**kwargs):
"""
DIA-PreResNet-269 model with stride at the second convolution in bottleneck block from 'DIANet: Dense-and-Implicit
Attention Network,' https://arxiv.org/abs/1905.10671.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_diapreresnet(blocks=269, conv1_stride=False, model_name="diapreresnet269b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
diapreresnet10,
diapreresnet12,
diapreresnet14,
diapreresnetbc14b,
diapreresnet16,
diapreresnet18,
diapreresnet26,
diapreresnetbc26b,
diapreresnet34,
diapreresnetbc38b,
diapreresnet50,
diapreresnet50b,
diapreresnet101,
diapreresnet101b,
diapreresnet152,
diapreresnet152b,
diapreresnet200,
diapreresnet200b,
diapreresnet269b,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != diapreresnet10 or weight_count == 6295688)
assert (model != diapreresnet12 or weight_count == 6369672)
assert (model != diapreresnet14 or weight_count == 6665096)
assert (model != diapreresnetbc14b or weight_count == 24016424)
assert (model != diapreresnet16 or weight_count == 7845768)
assert (model != diapreresnet18 or weight_count == 12566408)
assert (model != diapreresnet26 or weight_count == 18837128)
assert (model != diapreresnetbc26b or weight_count == 29946664)
assert (model != diapreresnet34 or weight_count == 22674568)
assert (model != diapreresnetbc38b or weight_count == 35876904)
assert (model != diapreresnet50 or weight_count == 39508520)
assert (model != diapreresnet50b or weight_count == 39508520)
assert (model != diapreresnet101 or weight_count == 58500648)
assert (model != diapreresnet101b or weight_count == 58500648)
assert (model != diapreresnet152 or weight_count == 74144296)
assert (model != diapreresnet152b or weight_count == 74144296)
assert (model != diapreresnet200 or weight_count == 78625320)
assert (model != diapreresnet200b or weight_count == 78625320)
assert (model != diapreresnet269b or weight_count == 116024872)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 23,989 | 35.293495 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/jasperdr.py | """
Jasper DR (Dense Residual) for ASR, implemented in Gluon.
Original paper: 'Jasper: An End-to-End Convolutional Neural Acoustic Model,' https://arxiv.org/abs/1904.03288.
"""
__all__ = ['jasperdr10x5_en', 'jasperdr10x5_en_nr']
from .jasper import get_jasper
def jasperdr10x5_en(classes=29, **kwargs):
"""
Jasper DR 10x5 model for English language from 'Jasper: An End-to-End Convolutional Neural Acoustic Model,'
https://arxiv.org/abs/1904.03288.
Parameters:
----------
classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_jasper(classes=classes, version=("jasper", "10x5"), use_dr=True, model_name="jasperdr10x5_en",
**kwargs)
def jasperdr10x5_en_nr(classes=29, **kwargs):
"""
Jasper DR 10x5 model for English language (with presence of noise) from 'Jasper: An End-to-End Convolutional Neural
Acoustic Model,' https://arxiv.org/abs/1904.03288.
Parameters:
----------
classes : int, default 29
Number of classification classes (number of graphemes).
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_jasper(classes=classes, version=("jasper", "10x5"), use_dr=True, model_name="jasperdr10x5_en_nr",
**kwargs)
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def _test():
import numpy as np
import mxnet as mx
pretrained = False
audio_features = 64
models = [
jasperdr10x5_en,
jasperdr10x5_en_nr,
]
for model in models:
net = model(
in_channels=audio_features,
pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != jasperdr10x5_en or weight_count == 332632349)
assert (model != jasperdr10x5_en_nr or weight_count == 332632349)
batch = 3
seq_len = np.random.randint(60, 150, batch)
seq_len_max = seq_len.max() + 2
x = mx.nd.random.normal(shape=(batch, audio_features, seq_len_max), ctx=ctx)
x_len = mx.nd.array(seq_len, ctx=ctx, dtype=np.long)
y, y_len = net(x, x_len)
assert (y.shape[:2] == (batch, net.classes))
assert (y.shape[2] in [seq_len_max // 2, seq_len_max // 2 + 1])
if __name__ == "__main__":
_test()
| 3,244 | 30.504854 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/deeplabv3.py | """
DeepLabv3 for image segmentation, implemented in Gluon.
Original paper: 'Rethinking Atrous Convolution for Semantic Image Segmentation,' https://arxiv.org/abs/1706.05587.
"""
__all__ = ['DeepLabv3', 'deeplabv3_resnetd50b_voc', 'deeplabv3_resnetd101b_voc', 'deeplabv3_resnetd152b_voc',
'deeplabv3_resnetd50b_coco', 'deeplabv3_resnetd101b_coco', 'deeplabv3_resnetd152b_coco',
'deeplabv3_resnetd50b_ade20k', 'deeplabv3_resnetd101b_ade20k', 'deeplabv3_resnetd50b_cityscapes',
'deeplabv3_resnetd101b_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent
from .common import conv1x1, conv1x1_block, conv3x3_block
from .resnetd import resnetd50b, resnetd101b, resnetd152b
class DeepLabv3FinalBlock(HybridBlock):
"""
DeepLabv3 final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
bottleneck_factor=4,
**kwargs):
super(DeepLabv3FinalBlock, self).__init__(**kwargs)
assert (in_channels % bottleneck_factor == 0)
mid_channels = in_channels // bottleneck_factor
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
self.dropout = nn.Dropout(rate=0.1)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
def hybrid_forward(self, F, x, out_size):
x = self.conv1(x)
x = self.dropout(x)
x = self.conv2(x)
x = F.contrib.BilinearResize2D(x, height=out_size[0], width=out_size[1])
return x
class ASPPAvgBranch(HybridBlock):
"""
ASPP branch with average pooling.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
upscale_out_size : tuple of 2 int or None
Spatial size of output image for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
upscale_out_size,
**kwargs):
super(ASPPAvgBranch, self).__init__(**kwargs)
self.upscale_out_size = upscale_out_size
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels)
def hybrid_forward(self, F, x):
in_size = self.upscale_out_size if self.upscale_out_size is not None else x.shape[2:]
x = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
x = self.conv(x)
x = F.contrib.BilinearResize2D(x, height=in_size[0], width=in_size[1])
return x
class AtrousSpatialPyramidPooling(HybridBlock):
"""
Atrous Spatial Pyramid Pooling (ASPP) module.
Parameters:
----------
in_channels : int
Number of input channels.
upscale_out_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
upscale_out_size,
**kwargs):
super(AtrousSpatialPyramidPooling, self).__init__(**kwargs)
atrous_rates = [12, 24, 36]
assert (in_channels % 8 == 0)
mid_channels = in_channels // 8
project_in_channels = 5 * mid_channels
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels))
for atrous_rate in atrous_rates:
self.branches.add(conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
padding=atrous_rate,
dilation=atrous_rate))
self.branches.add(ASPPAvgBranch(
in_channels=in_channels,
out_channels=mid_channels,
upscale_out_size=upscale_out_size))
self.conv = conv1x1_block(
in_channels=project_in_channels,
out_channels=mid_channels)
self.dropout = nn.Dropout(rate=0.5)
def hybrid_forward(self, F, x):
x = self.branches(x)
x = self.conv(x)
x = self.dropout(x)
return x
class DeepLabv3(HybridBlock):
"""
DeepLabv3 model from 'Rethinking Atrous Convolution for Semantic Image Segmentation,'
https://arxiv.org/abs/1706.05587.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
classes=21,
**kwargs):
super(DeepLabv3, self).__init__(**kwargs)
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
with self.name_scope():
self.backbone = backbone
pool_out_size = (self.in_size[0] // 8, self.in_size[1] // 8) if fixed_size else None
self.pool = AtrousSpatialPyramidPooling(
in_channels=backbone_out_channels,
upscale_out_size=pool_out_size)
pool_out_channels = backbone_out_channels // 8
self.final_block = DeepLabv3FinalBlock(
in_channels=pool_out_channels,
out_channels=classes,
bottleneck_factor=1)
if self.aux:
aux_out_channels = backbone_out_channels // 2
self.aux_block = DeepLabv3FinalBlock(
in_channels=aux_out_channels,
out_channels=classes,
bottleneck_factor=4)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
x, y = self.backbone(x)
x = self.pool(x)
x = self.final_block(x, in_size)
if self.aux:
y = self.aux_block(y, in_size)
return x, y
else:
return x
def get_deeplabv3(backbone,
classes,
aux=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DeepLabv3 model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = DeepLabv3(
backbone=backbone,
classes=classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def deeplabv3_resnetd50b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_voc", **kwargs)
def deeplabv3_resnetd101b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_voc", **kwargs)
def deeplabv3_resnetd152b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-152b for Pascal VOC from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd152b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd152b_voc", **kwargs)
def deeplabv3_resnetd50b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for COCO from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_coco", **kwargs)
def deeplabv3_resnetd101b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for COCO from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_coco", **kwargs)
def deeplabv3_resnetd152b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-152b for COCO from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd152b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd152b_coco", **kwargs)
def deeplabv3_resnetd50b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_ade20k",
**kwargs)
def deeplabv3_resnetd101b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for ADE20K from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_ade20k",
**kwargs)
def deeplabv3_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-50b for Cityscapes from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd50b_cityscapes",
**kwargs)
def deeplabv3_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
DeepLabv3 model on the base of ResNet(D)-101b for Cityscapes from 'Rethinking Atrous Convolution for Semantic Image
Segmentation,' https://arxiv.org/abs/1706.05587.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_deeplabv3(backbone=backbone, classes=classes, aux=aux, model_name="deeplabv3_resnetd101b_cityscapes",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (480, 480)
aux = False
pretrained = False
models = [
(deeplabv3_resnetd50b_voc, 21),
(deeplabv3_resnetd101b_voc, 21),
(deeplabv3_resnetd152b_voc, 21),
(deeplabv3_resnetd50b_coco, 21),
(deeplabv3_resnetd101b_coco, 21),
(deeplabv3_resnetd152b_coco, 21),
(deeplabv3_resnetd50b_ade20k, 150),
(deeplabv3_resnetd101b_ade20k, 150),
(deeplabv3_resnetd50b_cityscapes, 19),
(deeplabv3_resnetd101b_cityscapes, 19),
]
for model, classes in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != deeplabv3_resnetd50b_voc or weight_count == 42127850)
assert (model != deeplabv3_resnetd101b_voc or weight_count == 61119978)
assert (model != deeplabv3_resnetd152b_voc or weight_count == 76763626)
assert (model != deeplabv3_resnetd50b_coco or weight_count == 42127850)
assert (model != deeplabv3_resnetd101b_coco or weight_count == 61119978)
assert (model != deeplabv3_resnetd152b_coco or weight_count == 76763626)
assert (model != deeplabv3_resnetd50b_ade20k or weight_count == 42194156)
assert (model != deeplabv3_resnetd101b_ade20k or weight_count == 61186284)
assert (model != deeplabv3_resnetd50b_cityscapes or weight_count == 42126822)
assert (model != deeplabv3_resnetd101b_cityscapes or weight_count == 61118950)
else:
assert (model != deeplabv3_resnetd50b_voc or weight_count == 39762645)
assert (model != deeplabv3_resnetd101b_voc or weight_count == 58754773)
assert (model != deeplabv3_resnetd152b_voc or weight_count == 74398421)
assert (model != deeplabv3_resnetd50b_coco or weight_count == 39762645)
assert (model != deeplabv3_resnetd101b_coco or weight_count == 58754773)
assert (model != deeplabv3_resnetd152b_coco or weight_count == 74398421)
assert (model != deeplabv3_resnetd50b_ade20k or weight_count == 39795798)
assert (model != deeplabv3_resnetd101b_ade20k or weight_count == 58787926)
assert (model != deeplabv3_resnetd50b_cityscapes or weight_count == 39762131)
assert (model != deeplabv3_resnetd101b_cityscapes or weight_count == 58754259)
x = mx.nd.zeros((1, 3, in_size[0], in_size[1]), ctx=ctx)
ys = net(x)
y = ys[0] if aux else ys
assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and
(y.shape[3] == x.shape[3]))
if __name__ == "__main__":
_test()
| 22,813 | 38.884615 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/fpenet.py | """
FPENet for image segmentation, implemented in Gluon.
Original paper: 'Feature Pyramid Encoding Network for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1909.08599.
"""
__all__ = ['FPENet', 'fpenet_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv1x1_block, conv3x3_block, SEBlock, InterpolationBlock, MultiOutputSequential
class FPEBlock(HybridBlock):
"""
FPENet block.
Parameters:
----------
channels : int
Number of input/output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
channels,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(FPEBlock, self).__init__(**kwargs)
dilations = [1, 2, 4, 8]
assert (channels % len(dilations) == 0)
mid_channels = channels // len(dilations)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i, dilation in enumerate(dilations):
self.blocks.add(conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
groups=mid_channels,
dilation=dilation,
padding=dilation,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
def hybrid_forward(self, F, x):
xs = F.split(x, axis=1, num_outputs=len(self.blocks._children))
ys = []
for bi, xsi in zip(self.blocks._children.values(), xs):
if len(ys) == 0:
ys.append(bi(xsi))
else:
ys.append(bi(xsi + ys[-1]))
x = F.concat(*ys, dim=1)
return x
class FPEUnit(HybridBlock):
"""
FPENet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck_factor : int
Bottleneck factor.
use_se : bool
Whether to use SE-module.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bottleneck_factor,
use_se,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(FPEUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.use_se = use_se
mid1_channels = in_channels * bottleneck_factor
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid1_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.block = FPEBlock(channels=mid1_channels)
self.conv2 = conv1x1_block(
in_channels=mid1_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
if self.use_se:
self.se = SEBlock(channels=out_channels)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.conv1(x)
x = self.block(x)
x = self.conv2(x)
if self.use_se:
x = self.se(x)
x = x + identity
x = self.activ(x)
return x
class FPEStage(HybridBlock):
"""
FPENet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
layers : int
Number of layers.
use_se : bool
Whether to use SE-module.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
layers,
use_se,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(FPEStage, self).__init__(**kwargs)
self.use_block = (layers > 1)
with self.name_scope():
if self.use_block:
self.down = FPEUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
bottleneck_factor=4,
use_se=use_se,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.blocks = nn.HybridSequential(prefix="")
for i in range(layers - 1):
self.blocks.add(FPEUnit(
in_channels=out_channels,
out_channels=out_channels,
strides=1,
bottleneck_factor=1,
use_se=use_se,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
else:
self.down = FPEUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=1,
bottleneck_factor=1,
use_se=use_se,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
def hybrid_forward(self, F, x):
x = self.down(x)
if self.use_block:
y = self.blocks(x)
x = x + y
return x
class MEUBlock(HybridBlock):
"""
FPENet specific mutual embedding upsample (MEU) block.
Parameters:
----------
in_channels_high : int
Number of input channels for x_high.
in_channels_low : int
Number of input channels for x_low.
out_channels : int
Number of output channels.
out_size : tuple of 2 int
Spatial size of output image.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels_high,
in_channels_low,
out_channels,
out_size,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(MEUBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv_high = conv1x1_block(
in_channels=in_channels_high,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
self.conv_low = conv1x1_block(
in_channels=in_channels_low,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
self.conv_w_high = conv1x1(
in_channels=out_channels,
out_channels=out_channels)
self.conv_w_low = conv1x1(
in_channels=1,
out_channels=1)
self.sigmoid = nn.Activation("sigmoid")
self.relu = nn.Activation("relu")
self.up = InterpolationBlock(scale_factor=2, out_size=out_size)
def hybrid_forward(self, F, x_high, x_low):
x_high = self.conv_high(x_high)
x_low = self.conv_low(x_low)
w_high = F.contrib.AdaptiveAvgPooling2D(x_high, output_size=1)
w_high = self.conv_w_high(w_high)
w_high = self.relu(w_high)
w_high = self.sigmoid(w_high)
w_low = x_low.mean(axis=1, keepdims=True)
w_low = self.conv_w_low(w_low)
w_low = self.sigmoid(w_low)
x_high = self.up(x_high)
x_high = F.broadcast_mul(x_high, w_low)
x_low = F.broadcast_mul(x_low, w_high)
out = x_high + x_low
return out
class FPENet(HybridBlock):
"""
FPENet model from 'Feature Pyramid Encoding Network for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1909.08599.
Parameters:
----------
layers : list of int
Number of layers for each unit.
channels : list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
meu_channels : list of int
Number of output channels for MEU blocks.
use_se : bool
Whether to use SE-module.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default False
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (1024, 2048)
Spatial size of the expected input image.
classes : int, default 19
Number of segmentation classes.
"""
def __init__(self,
layers,
channels,
init_block_channels,
meu_channels,
use_se,
bn_use_global_stats=False,
bn_cudnn_off=False,
aux=False,
fixed_size=False,
in_channels=3,
in_size=(1024, 2048),
classes=19,
**kwargs):
super(FPENet, self).__init__(**kwargs)
assert (aux is not None)
assert (fixed_size is not None)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
with self.name_scope():
self.stem = conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
in_channels = init_block_channels
self.encoder = MultiOutputSequential(return_last=False)
for i, (layers_i, out_channels) in enumerate(zip(layers, channels)):
stage = FPEStage(
in_channels=in_channels,
out_channels=out_channels,
layers=layers_i,
use_se=use_se,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
stage.do_output = True
self.encoder.add(stage)
in_channels = out_channels
self.meu1 = MEUBlock(
in_channels_high=channels[-1],
in_channels_low=channels[-2],
out_channels=meu_channels[0],
out_size=((in_size[0] // 4, in_size[1] // 4) if fixed_size else None),
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.meu2 = MEUBlock(
in_channels_high=meu_channels[0],
in_channels_low=channels[-3],
out_channels=meu_channels[1],
out_size=((in_size[0] // 2, in_size[1] // 2) if fixed_size else None),
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
in_channels = meu_channels[1]
self.classifier = conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=True)
self.up = InterpolationBlock(
scale_factor=2,
out_size=(in_size if fixed_size else None))
def hybrid_forward(self, F, x):
x = self.stem(x)
y = self.encoder(x)
x = self.meu1(y[2], y[1])
x = self.meu2(x, y[0])
x = self.classifier(x)
x = self.up(x)
return x
def get_fpenet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create FPENet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
width = 16
channels = [int(width * (2 ** i)) for i in range(3)]
init_block_channels = width
layers = [1, 3, 9]
meu_channels = [64, 32]
use_se = False
net = FPENet(
layers=layers,
channels=channels,
init_block_channels=init_block_channels,
meu_channels=meu_channels,
use_se=use_se,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def fpenet_cityscapes(classes=19, **kwargs):
"""
FPENet model for Cityscapes from 'Feature Pyramid Encoding Network for Real-time Semantic Segmentation,'
https://arxiv.org/abs/1909.08599.
Parameters:
----------
classes : int, default 19
Number of segmentation classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fpenet(classes=classes, model_name="fpenet_cityscapes", **kwargs)
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def _test():
import mxnet as mx
pretrained = False
fixed_size = True
in_size = (1024, 2048)
classes = 19
models = [
fpenet_cityscapes,
]
for model in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net.hybridize()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != fpenet_cityscapes or weight_count == 115125)
batch = 4
x = mx.nd.random.normal(shape=(batch, 3, in_size[0], in_size[1]), ctx=ctx)
y = net(x)
assert (y.shape == (batch, classes, in_size[0], in_size[1]))
if __name__ == "__main__":
_test()
| 16,864 | 32.662675 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/irevnet.py | """
i-RevNet for ImageNet-1K, implemented in Gluon.
Original paper: 'i-RevNet: Deep Invertible Networks,' https://arxiv.org/abs/1802.07088.
"""
__all__ = ['IRevNet', 'irevnet301', 'IRevDownscale', 'IRevSplitBlock', 'IRevMergeBlock']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3, pre_conv3x3_block, DualPathSequential
class IRevDualPathSequential(DualPathSequential):
"""
An invertible sequential container for hybrid blocks with dual inputs/outputs.
Blocks will be executed in the order they are added.
Parameters:
----------
return_two : bool, default True
Whether to return two output after execution.
first_ordinals : int, default 0
Number of the first blocks with single input/output.
last_ordinals : int, default 0
Number of the final blocks with single input/output.
dual_path_scheme : function
Scheme of dual path response for a block.
dual_path_scheme_ordinal : function
Scheme of dual path response for an ordinal block.
last_noninvertible : int, default 0
Number of the final blocks skipped during inverse.
"""
def __init__(self,
return_two=True,
first_ordinals=0,
last_ordinals=0,
dual_path_scheme=(lambda block, x1, x2: block(x1, x2)),
dual_path_scheme_ordinal=(lambda block, x1, x2: (block(x1), x2)),
last_noninvertible=0,
**kwargs):
super(IRevDualPathSequential, self).__init__(
return_two=return_two,
first_ordinals=first_ordinals,
last_ordinals=last_ordinals,
dual_path_scheme=dual_path_scheme,
dual_path_scheme_ordinal=dual_path_scheme_ordinal,
**kwargs)
self.last_noninvertible = last_noninvertible
def inverse(self, x1, x2=None):
length = len(self._children.values())
for i, block in enumerate(reversed(self._children.values())):
if i < self.last_noninvertible:
pass
elif (i < self.last_ordinals) or (i >= length - self.first_ordinals):
x1, x2 = self.dual_path_scheme_ordinal(block.inverse, x1, x2)
else:
x1, x2 = self.dual_path_scheme(block.inverse, x1, x2)
if self.return_two:
return x1, x2
else:
return x1
class IRevDownscale(HybridBlock):
"""
i-RevNet specific downscale (so-called psi-block).
Parameters:
----------
scale : int
Scale (downscale) value.
"""
def __init__(self,
scale,
**kwargs):
super(IRevDownscale, self).__init__(**kwargs)
self.scale = scale
def hybrid_forward(self, F, x):
batch, x_channels, x_height, x_width = x.shape
y_channels = x_channels * self.scale * self.scale
assert (x_height % self.scale == 0)
y_height = x_height // self.scale
y = x.transpose(axes=(0, 2, 3, 1))
d2_split_seq = y.split(axis=2, num_outputs=(y.shape[2] // self.scale))
d2_split_seq = [t.reshape(batch, y_height, y_channels) for t in d2_split_seq]
y = F.stack(*d2_split_seq, axis=1)
y = y.transpose(axes=(0, 3, 2, 1))
return y
def inverse(self, y):
import mxnet.ndarray as F
scale_sqr = self.scale * self.scale
batch, y_channels, y_height, y_width = y.shape
assert (y_channels % scale_sqr == 0)
x_channels = y_channels // scale_sqr
x_height = y_height * self.scale
x_width = y_width * self.scale
x = y.transpose(axes=(0, 2, 3, 1))
x = x.reshape(batch, y_height, y_width, scale_sqr, x_channels)
d3_split_seq = x.split(axis=3, num_outputs=(x.shape[3] // self.scale))
d3_split_seq = [t.reshape(batch, y_height, x_width, x_channels) for t in d3_split_seq]
x = F.stack(*d3_split_seq, axis=0)
x = x.swapaxes(0, 1).transpose(axes=(0, 2, 1, 3, 4)).reshape(batch, x_height, x_width, x_channels)
x = x.transpose(axes=(0, 3, 1, 2))
return x
class IRevInjectivePad(HybridBlock):
"""
i-RevNet channel zero padding block.
Parameters:
----------
padding : int
Size of the padding.
"""
def __init__(self,
padding,
**kwargs):
super(IRevInjectivePad, self).__init__(**kwargs)
self.padding = padding
def hybrid_forward(self, F, x):
x = x.transpose(axes=(0, 2, 1, 3))
x = F.pad(x, mode="constant", pad_width=(0, 0, 0, 0, 0, self.padding, 0, 0), constant_value=0)
x = x.transpose(axes=(0, 2, 1, 3))
return x
def inverse(self, x):
return x[:, :x.shape[1] - self.padding, :, :]
class IRevSplitBlock(HybridBlock):
"""
iRevNet split block.
"""
def __init__(self,
**kwargs):
super(IRevSplitBlock, self).__init__(**kwargs)
def hybrid_forward(self, F, x, _):
x1, x2 = F.split(x, axis=1, num_outputs=2)
return x1, x2
def inverse(self, x1, x2):
import mxnet.ndarray as F
x = F.concat(x1, x2, dim=1)
return x, None
class IRevMergeBlock(HybridBlock):
"""
iRevNet merge block.
"""
def __init__(self,
**kwargs):
super(IRevMergeBlock, self).__init__(**kwargs)
def hybrid_forward(self, F, x1, x2):
x = F.concat(x1, x2, dim=1)
return x, x
def inverse(self, x, _):
import mxnet.ndarray as F
x1, x2 = F.split(x, axis=1, num_outputs=2)
return x1, x2
class IRevBottleneck(HybridBlock):
"""
iRevNet bottleneck block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the branch convolution layers.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
preactivate : bool
Whether use pre-activation for the first convolution block.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
preactivate,
**kwargs):
super(IRevBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
with self.name_scope():
if preactivate:
self.conv1 = pre_conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
else:
self.conv1 = conv3x3(
in_channels=in_channels,
out_channels=mid_channels,
strides=strides)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class IRevUnit(HybridBlock):
"""
iRevNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the branch convolution layers.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
preactivate : bool
Whether use pre-activation for the first convolution block.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
preactivate,
**kwargs):
super(IRevUnit, self).__init__(**kwargs)
if not preactivate:
in_channels = in_channels // 2
padding = 2 * (out_channels - in_channels)
self.do_padding = (padding != 0) and (strides == 1)
self.do_downscale = (strides != 1)
with self.name_scope():
if self.do_padding:
self.pad = IRevInjectivePad(padding)
self.bottleneck = IRevBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
preactivate=preactivate)
if self.do_downscale:
self.psi = IRevDownscale(strides)
def hybrid_forward(self, F, x1, x2):
if self.do_padding:
x = F.concat(x1, x2, dim=1)
x = self.pad(x)
x1, x2 = F.split(x, axis=1, num_outputs=2)
fx2 = self.bottleneck(x2)
if self.do_downscale:
x1 = self.psi(x1)
x2 = self.psi(x2)
y1 = fx2 + x1
return x2, y1
def inverse(self, x2, y1):
import mxnet.ndarray as F
if self.do_downscale:
x2 = self.psi.inverse(x2)
fx2 = - self.bottleneck(x2)
x1 = fx2 + y1
if self.do_downscale:
x1 = self.psi.inverse(x1)
if self.do_padding:
x = F.concat(x1, x2, dim=1)
x = self.pad.inverse(x)
x1, x2 = F.split(x, axis=1, num_outputs=2)
return x1, x2
class IRevPostActivation(HybridBlock):
"""
iRevNet specific post-activation block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_use_global_stats=False,
**kwargs):
super(IRevPostActivation, self).__init__(**kwargs)
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.activ(x)
return x
class IRevNet(HybridBlock):
"""
i-RevNet model from 'i-RevNet: Deep Invertible Networks,' https://arxiv.org/abs/1802.07088.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(IRevNet, self).__init__(**kwargs)
assert (in_channels > 0)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = IRevDualPathSequential(
first_ordinals=1,
last_ordinals=2,
last_noninvertible=2)
self.features.add(IRevDownscale(scale=2))
in_channels = init_block_channels
self.features.add(IRevSplitBlock())
for i, channels_per_stage in enumerate(channels):
stage = IRevDualPathSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) else 1
preactivate = not ((i == 0) and (j == 0))
stage.add(IRevUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
preactivate=preactivate))
in_channels = out_channels
self.features.add(stage)
in_channels = final_block_channels
self.features.add(IRevMergeBlock())
self.features.add(IRevPostActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x, return_out_bij=False):
x, out_bij = self.features(x)
x = self.output(x)
if return_out_bij:
return x, out_bij
else:
return x
def inverse(self, out_bij):
x, _ = self.features.inverse(out_bij)
return x
def get_irevnet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create i-RevNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 301:
layers = [6, 16, 72, 6]
else:
raise ValueError("Unsupported i-RevNet with number of blocks: {}".format(blocks))
assert (sum(layers) * 3 + 1 == blocks)
channels_per_layers = [24, 96, 384, 1536]
init_block_channels = 12
final_block_channels = 3072
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = IRevNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ignore_extra=True,
ctx=ctx)
return net
def irevnet301(**kwargs):
"""
i-RevNet-301 model from 'i-RevNet: Deep Invertible Networks,' https://arxiv.org/abs/1802.07088.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_irevnet(blocks=301, model_name="irevnet301", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
irevnet301,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != irevnet301 or weight_count == 125120356)
x = mx.nd.random.randn(2, 3, 224, 224, ctx=ctx)
y = net(x)
assert (y.shape == (2, 1000))
y, out_bij = net(x, True)
x_ = net.inverse(out_bij)
assert (x_.shape == (2, 3, 224, 224))
assert ((np.max(np.abs(x.asnumpy() - x_.asnumpy())) < 1e-4) or (np.max(np.abs(y.asnumpy()) > 1e10)))
if __name__ == "__main__":
_test()
| 17,306 | 31.59322 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/model_store.py | """
Model store which provides pretrained models.
"""
__all__ = ['get_model_file']
import os
import zipfile
import logging
from mxnet.gluon.utils import download, check_sha1
_model_sha1 = {name: (error, checksum, repo_release_tag) for name, error, checksum, repo_release_tag in [
('alexnet', '1610', '4dd7cfb6275229c3b3889ec7ad29f8d48a499193', 'v0.0.481'),
('alexnetb', '1705', '0181007ac2ba5d5c051346af1eea4a00c36f34e9', 'v0.0.485'),
('zfnet', '1678', '3299fdce9712697f9e03e1a0cf741745553a28cf', 'v0.0.395'),
('zfnetb', '1459', '7a654810eced689323e414030165cf1392c9684f', 'v0.0.400'),
('vgg11', '1016', '3d78e0ec95d358577acf8b2e2f768a72ec319ee3', 'v0.0.381'),
('vgg13', '0950', 'd2bcaaf3704afb47b2818066255b3de65d0c03e1', 'v0.0.388'),
('vgg16', '0832', '22fe503aa438ebfd1f3128b676db21a06de3cd59', 'v0.0.401'),
('vgg19', '0767', 'e198aa1f6519b3331e5ebbe782ca35fd2aca6213', 'v0.0.420'),
('bn_vgg11', '0934', '3f79cab1e9e0bab0d57c0f80a6c9782e5cdc765d', 'v0.0.339'),
('bn_vgg13', '0887', '540243b0a3eb2ae3974c2d75cf82e655fa4f0dda', 'v0.0.353'),
('bn_vgg16', '0757', '90441925c7c19c9db6bf96824c2440774c0e54df', 'v0.0.359'),
('bn_vgg19', '0689', 'cd8f4229e5e757ba951afc6eb1a4ec7a471022f7', 'v0.0.360'),
('bn_vgg11b', '0975', '685ae89dcf3916b9c6e887d4c5bc441a34253744', 'v0.0.407'),
('bn_vgg13b', '0912', 'fc678318391d45465bda4aa17744dc0d148135e3', 'v0.0.488'),
('bn_vgg16b', '0775', '77dad99b4f72176870655ffbf59e96f3b9570a0b', 'v0.0.489'),
('bn_vgg19b', '0735', '8d9a132bfd9230304e588ec159170ee3173d64ba', 'v0.0.490'),
('bninception', '0754', '75225419bae387931f6c8ccc19dc48fb1cc8cdae', 'v0.0.405'),
('resnet10', '1220', '83ebb0937d85142c3f827be56f08a57a5996c90b', 'v0.0.569'),
('resnet12', '1203', 'a41948541130b9ebf8285412b38c668198e4ad4f', 'v0.0.485'),
('resnet14', '1086', '5d9f22a7aa6c821eb69fc043af2da040f36110eb', 'v0.0.491'),
('resnetbc14b', '1033', '4ff348bebc7696cd219be605668bb76546dd9641', 'v0.0.481'),
('resnet16', '0978', '2c28373c9ce1f50ff6a4b47a16a3d1bc2e557c75', 'v0.0.493'),
('resnet18_wd4', '1740', 'a74ea15d056a84133e02706a6c7f8ed8f50c8462', 'v0.0.262'),
('resnet18_wd2', '1284', '9a5154065311c8ffbbc57b20a443b205c7a910fa', 'v0.0.263'),
('resnet18_w3d4', '1066', '1a574a4198a5bbf01572c2b3f091eb824ff8196e', 'v0.0.266'),
('resnet18', '0867', '711ed8ab624c4c580e90321989aef6174ad2991d', 'v0.0.478'),
('resnet26', '0823', 'a2746eb21d73c3c85edacabe36d680988986f890', 'v0.0.489'),
('resnetbc26b', '0758', '2b5e8d0888936a340ea13c7e8ba30b237cd62f1c', 'v0.0.313'),
('resnet34', '0743', '5cdeeccda6f87fe13aed279213006061a8b42037', 'v0.0.291'),
('resnetbc38b', '0672', '820944641ba54f4aaa43d2a305ab52b9dcb740c7', 'v0.0.328'),
('resnet50', '0604', 'a71d1d2a8e8e4259742bbd67c386623233b57c6c', 'v0.0.329'),
('resnet50b', '0611', 'ca12f8d804000bf5202e2e3838dec7ef6b772149', 'v0.0.308'),
('resnet101', '0516', '75a3105c609aedb89d63dd3dc47b578f41fff6be', 'v0.0.499'),
('resnet101b', '0512', 'af5c4233b28c8b7acd3b3ebe02f9f2eda2c77824', 'v0.0.357'),
('resnet152', '0444', '15f2dd80f964225e685e7564cecd04911feb97ee', 'v0.0.518'),
('resnet152b', '0429', '45a3ad0f989852a4456c907fa3b40b00dd24937c', 'v0.0.517'),
('preresnet10', '1401', '2b96c0818dbabc422e98d8fbfc9b684c023922ed', 'v0.0.249'),
('preresnet12', '1321', 'b628efb5415784075e18b6734b1ba1e5c7280dee', 'v0.0.257'),
('preresnet14', '1218', 'd65fa6287414d9412e34ac0df6921eaa5646a2b6', 'v0.0.260'),
('preresnetbc14b', '1151', 'c712a235b75ad4956411bab265dfd924c748726e', 'v0.0.315'),
('preresnet16', '1081', '5b00b55f74adb9ee4a6ba5f946aafd48b4d8aa47', 'v0.0.261'),
('preresnet18_wd4', '1778', '3d949d1ae20b9188a423b56a1f7a89b4bcecc3d2', 'v0.0.272'),
('preresnet18_wd2', '1319', '63e55c24bc0ae93a8f8daefa4b35dc3e70147f65', 'v0.0.273'),
('preresnet18_w3d4', '1068', 'eb5698616757fd0947851f62c33fc4d7b4a5f23a', 'v0.0.274'),
('preresnet18', '0951', '71279a0b7339f1efd12bed737219a9ed76175a9d', 'v0.0.140'),
('preresnet26', '0834', 'c2ecba0948934c28d459b7f87fbc1489420fd4fb', 'v0.0.316'),
('preresnetbc26b', '0786', '265f591f320db0915c18c16f4ed0e2e53ee46567', 'v0.0.325'),
('preresnet34', '0751', 'ba9c829e72d54f8b02cf32ea202c195d36568467', 'v0.0.300'),
('preresnetbc38b', '0633', '809d2defea82276fdd7ff2faafc1f6ffe57c93b5', 'v0.0.348'),
('preresnet50', '0620', '50f13b2d3fd197c8aa721745adaf2d6615fd8c16', 'v0.0.330'),
('preresnet50b', '0632', '951de2dc558f94f489ce62fedf979ccc08361641', 'v0.0.307'),
('preresnet101', '0534', 'ea9a1724662744fe349c0aa8dd0ec42708fe88c7', 'v0.0.504'),
('preresnet101b', '0540', '3839a4733a8a614bb6b7b4f555759bb5b8013d42', 'v0.0.351'),
('preresnet152', '0446', '3b41bd93a7a2807e8cc88ae75ec0cde69af1e786', 'v0.0.510'),
('preresnet152b', '0438', 'ebf71d023e88d006c55946e3c6cf38c5d0cffeb2', 'v0.0.523'),
('preresnet200b', '0446', '70135e6618bd8e870574a2667d3ec592238b3a3c', 'v0.0.529'),
('preresnet269b', '0501', '6e56119dc52a38af690bb41d4ae1683007558233', 'v0.0.545'),
('resnext14_16x4d', '1223', '1f8072e8d01d1427941a06bbca896211e98e2b75', 'v0.0.370'),
('resnext14_32x2d', '1247', '2ca8cc2544045c21d0a7ca740483097491ba0855', 'v0.0.371'),
('resnext14_32x4d', '1110', '9be6190e328c15a06703be3ba922d707c2f4d8e7', 'v0.0.327'),
('resnext26_32x2d', '0850', 'a1fb4451be6336d9f648ccc2c2dedacc5704904a', 'v0.0.373'),
('resnext26_32x4d', '0721', '5264d7efd606e1c95a2480050e9f03a7a2f02b09', 'v0.0.332'),
('resnext50_32x4d', '0545', '748e53b31fe716b5fdd059107d22bdfec3a31839', 'v0.0.498'),
('resnext101_32x4d', '0418', 'c07faa66647bd25dd222b44d8782c59fcc2e3708', 'v0.0.530'),
('resnext101_64x4d', '0439', '178e79057f27afec04219aa1a1a96abbbd8763a9', 'v0.0.544'),
('seresnet10', '1169', '675d4b5bbaaab1b87988df96819cd19e5323512c', 'v0.0.486'),
('seresnet12', '1176', 'df10dd88759750bb042ce7b09dd8a78f32d78e12', 'v0.0.544'),
('seresnet14', '1095', '579e2fecfe37d1c9422f6521fef610bfde6fcaaa', 'v0.0.545'),
('seresnet16', '0972', 'ef50864856c2120082e3db7d4193f31ec77624c6', 'v0.0.545'),
('seresnet18', '0920', '85a6b1da19645419cc3075852588cc7e7da5715f', 'v0.0.355'),
('seresnet26', '0803', '9f9004192240ae0125399d2f6acbb5359027039d', 'v0.0.363'),
('seresnetbc26b', '0682', '15ae6e19b1626107029df18bf3f5140e6fcb2b02', 'v0.0.366'),
('seresnetbc38b', '0575', 'f80f0c3c2612e1334204df1575b8a7cd18f851ff', 'v0.0.374'),
('seresnet50', '0560', 'e75ef498abfc021f356378b2c806de8927287fe7', 'v0.0.441'),
('seresnet50b', '0533', '0d8f0d23bb980621095e41de69e3a68d7aaeba45', 'v0.0.387'),
('seresnet101', '0441', 'f9cb9df623683520522c5fe8577e8a93cf253f46', 'v0.0.533'),
('seresnet101b', '0462', '59fae71a5db6ad1f1b5a79f7339920aa1191ed70', 'v0.0.460'),
('seresnet152', '0430', 'c712f55410938861c32740fe93770b0d54e094a3', 'v0.0.538'),
('sepreresnet10', '1221', 'c3ab94ee5c938f53a8a61849be3a463458444a1f', 'v0.0.544'),
('sepreresnet12', '1180', 'd6b7260b4931e8806368c42b8bb605b9a8afaa23', 'v0.0.543'),
('sepreresnet16', '0956', 'b5e4e06f6f5394f7557986193700a8ecf23ffdc7', 'v0.0.543'),
('sepreresnet18', '0881', 'b018c6e38601b0719445e302ea64196924319814', 'v0.0.543'),
('sepreresnet26', '0804', 'a2e48457a44958ed7f20940997f419080b1d14e8', 'v0.0.543'),
('sepreresnetbc26b', '0636', '33c94c9dd2986d0643f7c432d5203294ecf2124e', 'v0.0.399'),
('sepreresnetbc38b', '0563', 'd8f0fbd35b840743e312b9ec944c7fc141824ace', 'v0.0.409'),
('sepreresnet50b', '0532', '5b620ff7175c674ef41647e2e79992dceefeec19', 'v0.0.461'),
('seresnext50_32x4d', '0433', 'd74d1d0a5bd1449562e9f1de92b1389c647fdd13', 'v0.0.505'),
('seresnext101_32x4d', '0444', '3c89aec5b98c1bfca13bc8c21e07dba82243c012', 'v0.0.529'),
('seresnext101_64x4d', '0408', '2e039a419e4eb84ce93d1515027099c4dc13f5a1', 'v0.0.561'),
('senet16', '0806', 'ba26802160725af926e5a3217f8848bd3a6599fd', 'v0.0.341'),
('senet28', '0591', 'd5297a35a2e56ecc892499c1bd4373125a40b783', 'v0.0.356'),
('senet154', '0440', 'c0e2d2b9e70894e28e4f957296229bdd89b70607', 'v0.0.522'),
('resnestabc14', '0634', '4b0cbe8c59e9d764027f51670096037e2f058970', 'v0.0.493'),
('resnesta18', '0689', '8f37b6927751b27a72c1b8c625de265da9b31570', 'v0.0.489'),
('resnestabc26', '0470', 'f88d49d7bd6cdf1f448afdf4bc5dadd9921798dd', 'v0.0.495'),
('resnesta50', '0438', '6ce54a97d6e6fe8aa66755669b8f47a0790bfeb6', 'v0.0.531'),
('resnesta101', '0399', 'ab6c6f89407d7b2cf89406f7bb62a48e86d7cabc', 'v0.0.465'),
('resnesta152', '0451', '532c448de0011e90924b3c1e829f9525c6a5420e', 'v0.0.540'),
('resnesta200', '0340', '3bd1f0c8d9a2862b89590c1c490a38d6d982522c', 'v0.0.465'),
('resnesta269', '0336', '8333862a0c016432f6cd5b45f7e9de7ee6e3f319', 'v0.0.465'),
('ibn_resnet50', '0559', '0f75710a144ea1483e6235f6909f8b8c3555ec01', 'v0.0.495'),
('ibn_resnet101', '0489', 'db938be9060c0ea48913fa0fa9794bf6bcacea7f', 'v0.0.552'),
('ibnb_resnet50', '0579', '146aa6e143d582cd33af23c942e3e0b2fe152817', 'v0.0.552'),
('ibn_resnext101_32x4d', '0487', '1398597e25f0be2d7b05f190c5c53f7b8d5265e6', 'v0.0.553'),
('ibn_densenet121', '0646', '82ee3ff44fd308107ec07d759c52a99e94af7041', 'v0.0.493'),
('ibn_densenet169', '0608', 'b509f33947b2f205f03e49ce2d5ee87855bbf708', 'v0.0.500'),
('airnet50_1x64d_r2', '0523', '1f982e0e8b3af40a5d67ca854f18ca837aa332f4', 'v0.0.522'),
('airnet50_1x64d_r16', '0544', '09a9f13b9d5f3e27eb26271070cc4bf8dd72cf9f', 'v0.0.519'),
('airnext50_32x4d_r2', '0504', '664dd077a978562a376e8ab70b5d740893873111', 'v0.0.521'),
('bam_resnet50', '0538', 'fa612c3da552cd26094634c359413db69d0ef97b', 'v0.0.499'),
('cbam_resnet50', '0488', 'ebd4af235d763f4ecce6dd4e09b508ba61b561f7', 'v0.0.537'),
('scnet50', '0511', '359d35d017e838d3c3c2bdf1cc83074610dc3c5b', 'v0.0.493'),
('scnet101', '0446', 'bc5cade042e78813db4ce5878d733daa0ce7f89d', 'v0.0.507'),
('scneta50', '0459', 'c1f6f0286ed942bbc468395b82b7be84312f3c73', 'v0.0.509'),
('regnetx002', '1038', '7800b310f45b4666ef6c862bc8a2573f65ddaa40', 'v0.0.475'),
('regnetx004', '0855', 'b933a72fe7304a31391a9e9fcdf7ffc47ea05353', 'v0.0.479'),
('regnetx006', '0756', 'd41aa087bd288266105142b88b03b67d74352a46', 'v0.0.482'),
('regnetx008', '0724', '79309908dee31eda64e824a9b3bd33c0afaaf5a8', 'v0.0.482'),
('regnetx016', '0613', '018dbe2d89d2bac26f41d98b09ba57b049ed4cfe', 'v0.0.486'),
('regnetx032', '0568', '6d4372fcf0b9d6e3edefa99e72dd411b6a0c676c', 'v0.0.492'),
('regnetx040', '0469', 'c22092c7ceeac713a9102c697f23bec0b7cfaec0', 'v0.0.495'),
('regnetx064', '0458', '473342937e02dce6244a62780f98aed1f5eb831d', 'v0.0.535'),
('regnetx080', '0466', '4086910ad9df1693db658c86f874f686a279175c', 'v0.0.515'),
('regnetx120', '0518', 'ea20b368214230ce65c14218aedba07b68235e38', 'v0.0.542'),
('regnetx160', '0456', '941840a38b20e5b5c6ca092bed3f2ea0ff602895', 'v0.0.532'),
('regnetx320', '0394', 'a120c8d2d181dacffde8a3029292ee59da12e9be', 'v0.0.548'),
('regnety002', '0953', 'b37fcac05e59ba5e751701da8e81195bbfbf3db8', 'v0.0.476'),
('regnety004', '0747', '5626bdf45eaf0f23ddc6e1b68f6ad2db7ca119cf', 'v0.0.481'),
('regnety006', '0697', '81372679b5f9601a5bc72caa13aff378d2ac4233', 'v0.0.483'),
('regnety008', '0645', 'd92881be768a267f1cd2b540d087884bbe93f644', 'v0.0.483'),
('regnety016', '0568', 'c4541a25e92ebf31d9bddb4975738c08fe929836', 'v0.0.486'),
('regnety032', '0413', '9066698526cbc930706fe00b21ca56c31ad7e2e4', 'v0.0.473'),
('regnety040', '0467', '6039a215ee79a7855156c5166df4c6b34f8d501d', 'v0.0.494'),
('regnety064', '0445', '8ae16f971eacad369df091e68c26e63b8c6da9a5', 'v0.0.513'),
('regnety080', '0436', '4cd7fc61d54ec63bfeff11246e8258188351e7ed', 'v0.0.516'),
('regnety120', '0431', 'effa35b601ffbdced20342d9bfe3917a8279880a', 'v0.0.526'),
('regnety160', '0430', '8273156944dea5a439ef55e7ed84a10524be090c', 'v0.0.527'),
('regnety320', '0373', 'd26a5f80c1d67e3527ddbdce75683370c5799e06', 'v0.0.550'),
('pyramidnet101_a360', '0520', '3a98a2bfc4e74b00b5fff20783613281121a6d37', 'v0.0.507'),
('diracnet18v2', '1117', '27601f6fa54e3b10d77981f30650d7a9d4bce91e', 'v0.0.111'),
('diracnet34v2', '0946', '1faa6f1245e152d1a3e12de4b5dc1ba554bc3bb8', 'v0.0.111'),
('crunet56', '0536', 'd7112ed431630f4162fb2ebe7a7c0666fa1718a8', 'v0.0.558'),
('densenet121', '0685', 'd3a1fae8b311343498f736e494d60d32e35debfb', 'v0.0.314'),
('densenet161', '0592', '29897d410ea5ae427278df060de578911df74667', 'v0.0.432'),
('densenet169', '0605', '9c045c864828e773f92f998199821fc0c21e0eb4', 'v0.0.406'),
('densenet201', '0590', '89aa8c295b21fdd682df0027d2232e6dabf2cace', 'v0.0.426'),
('condensenet74_c4_g4', '0864', 'cde68fa2fcc9197e336717a17753a15a6efd7596', 'v0.0.4'),
('condensenet74_c8_g8', '1049', '4cf4a08e7fb46f5821049dcae97ae442b0ceb546', 'v0.0.4'),
('peleenet', '0979', '758d3cf992a6c92731069be091be2e8ebd3209e2', 'v0.0.496'),
('wrn50_2', '0606', '5ff344cae22c8ce3f21e042be5a5d899681d89a0', 'v0.0.520'),
('drnc26', '0711', 'e69a4e7bc94aab58e05880c886e8e829584845c3', 'v0.0.508'),
('drnc42', '0614', '6ee5008a3e66476380f4fadf3cf2633c32a596c9', 'v0.0.556'),
('drnc58', '0515', '418d18b26f32347e695b85a60d2bf658409be28d', 'v0.0.559'),
('drnd22', '0744', 'aecf15fc2171d29d8ea765afa540e4ddb40a9b19', 'v0.0.498'),
('drnd38', '0624', '4a86d6ebd970e02792068c46c80ae00686d759cc', 'v0.0.552'),
('drnd54', '0497', '117ca29b47a6e640269d455dfcc12e9958f5f1e1', 'v0.0.554'),
('drnd105', '0487', '52286b345e48cc873bbc0239cdd91b03beae655d', 'v0.0.564'),
('dpn68', '0658', '07251919c08640c94375670cbc5f0fbc312ed59b', 'v0.0.310'),
('dpn98', '0422', '1ace57dc702d5b6ca7eccb657036f682605d7efc', 'v0.0.540'),
('dpn131', '0477', 'e814a53cc6cddc0b6f2a11393a88c44723a8ec0c', 'v0.0.534'),
('darknet_tiny', '1746', '16501793621fbcb137f2dfb901760c1f621fa5ec', 'v0.0.69'),
('darknet_ref', '1668', '3011b4e14b629f80da54ab57bef305d588f748ab', 'v0.0.64'),
('darknet53', '0550', '1d0b4f620071d544a3a1ccbe437267b8925ecc63', 'v0.0.501'),
('irevnet301', '0735', '4378cc1061da0377a3c7fc00c6be49c3875c997a', 'v0.0.564'),
('bagnet9', '2536', '6b5d69dac8fd780e9ac26b2b964370445662c610', 'v0.0.553'),
('bagnet17', '1523', 'cc7b9a74ec742972fc289686bdaf5b1f5731121f', 'v0.0.558'),
('bagnet33', '1041', '3b20fa3a31aa08f216d7091749f59154b6357397', 'v0.0.561'),
('dla34', '0705', '557c5f4f1db66481af6101c75d9ba52b486eda25', 'v0.0.486'),
('dla46c', '1286', '5b38b67fecf2d701b736eb23e1301b6dd7eb5fb9', 'v0.0.282'),
('dla46xc', '1225', 'e570f5f00a098b0de34e657f9d8caeda524d39f3', 'v0.0.293'),
('dla60', '0554', '88b141c4ca81598dbe4333bd4ddd5a1554772348', 'v0.0.494'),
('dla60x', '0553', '58924af84faf6ac0d980b047ccb505619925d97d', 'v0.0.493'),
('dla60xc', '1074', '1b4e4048847e1ba060eb76538ee09e760f40be11', 'v0.0.289'),
('dla102', '0517', '0e5d954cee3bb260a679425524deab61871bd0bf', 'v0.0.505'),
('dla102x', '0470', 'ea82787ce2b3c79cff32ccf54461507cc09c58ce', 'v0.0.528'),
('dla102x2', '0423', 'dde259b3525fb6dae7e445f6e534a13159b3b365', 'v0.0.542'),
('dla169', '0460', '71971da4e881084851c9b581e719ce4edfb8292d', 'v0.0.539'),
('fishnet150', '0466', 'ed21862d14bb0a4e9d1723b7c639e455a7b141c0', 'v0.0.502'),
('espnetv2_wd2', '1971', '2821c339eb68154129e2864f1b5d74d876a08d77', 'v0.0.567'),
('espnetv2_w1', '1383', '3e031b749057f0237b3ebd7954b175b4bb13200f', 'v0.0.569'),
('espnetv2_w5d4', '1226', '2a498c6d631c54dfacdd5cf9b4c6a07a6db82b2d', 'v0.0.564'),
('espnetv2_w3d2', '1086', 'e869dabdd427a4ab38016d375d5e1b2f29758cde', 'v0.0.566'),
('espnetv2_w2', '0941', 'ef6b1cc026609da7a8df81360e36ecb04985ad28', 'v0.0.566'),
('dicenet_wd5', '3051', '46077eac47e2a299dc1d4edc0128bd3cbd8e9ea9', 'v0.0.563'),
('dicenet_wd2', '2308', 'c03d6d00b8f9ecc8ad87c1ee4bd82a0291a0af06', 'v0.0.561'),
('dicenet_w3d4', '1618', 'eefcffc69e24e6ba0df06db03d0fef8be6afdfe1', 'v0.0.567'),
('dicenet_w1', '1411', '93e2e0df3b9cb62c8ba08d3f3d61237e20fce9e2', 'v0.0.513'),
('dicenet_w5d4', '1251', '710737608fb6e2282cdd3903d4a703eb1989b594', 'v0.0.515'),
('dicenet_w3d2', '1144', 'bb2a902d7adbe96472157e131153f48dfea9f37f', 'v0.0.522'),
('dicenet_w7d8', '1081', '37b9276b8b97c87554652d6963a566cbd189b054', 'v0.0.527'),
('dicenet_w2', '0915', 'c301319d2d2867f155227389ea7402106858c6d4', 'v0.0.569'),
('hrnet_w18_small_v1', '0873', '1060c1c562770adb94115de1cad797684dbb5703', 'v0.0.492'),
('hrnet_w18_small_v2', '0602', '1f319e1b1990818754b97104467cfecf3401ae4e', 'v0.0.499'),
('hrnetv2_w18', '0500', '0a70090294c726c1f303e9114d23c9373695eb44', 'v0.0.508'),
('hrnetv2_w30', '0508', 'eb6afc492537295615dc465236934a9a970c1bef', 'v0.0.525'),
('hrnetv2_w32', '0496', '9c6c47dcce100ff006509ed559158862a53436a6', 'v0.0.528'),
('hrnetv2_w40', '0481', '2382694cceff60b32b4702e64945706f183e3e1c', 'v0.0.534'),
('hrnetv2_w44', '0486', 'f5882024ac9513c2d6c2b41bc774862e392f69ad', 'v0.0.541'),
('hrnetv2_w48', '0484', '936bb78f8c187ca3cc96c7564fa3ff8ad9cb68a6', 'v0.0.541'),
('hrnetv2_w64', '0478', '4c18c51411db532c3b8168f0a1ae3e461e703d68', 'v0.0.543'),
('vovnet27s', '0980', '2a44d455a86e740bd1be61d204553af213ca3ac6', 'v0.0.551'),
('vovnet39', '0548', '20b60ee6bb8c59c684f8ffed7fbda3d76b1a7280', 'v0.0.493'),
('vovnet57', '0510', 'ed3cad779b94c4f4f531d6c37f90f1a5209d3b1c', 'v0.0.505'),
('selecsls42b', '0596', 'f5a35c74880fbe94fbe3770a96968fc81186b42b', 'v0.0.493'),
('selecsls60', '0511', '960edec5159b56bdba4ce606df203c8ce14cb8ba', 'v0.0.496'),
('selecsls60b', '0537', '7f83801b1c158502d93a4523bdecda43017448d5', 'v0.0.495'),
('hardnet39ds', '0864', '72e8423ee0b496c10b48ae687a417385d9667394', 'v0.0.485'),
('hardnet68ds', '0738', '012bf3ac31f38c78d5cdef1367cc3c27447236af', 'v0.0.487'),
('hardnet68', '0702', '6667306df893290d38510a76fd42ec82e05e2f68', 'v0.0.557'),
('hardnet85', '0572', '3baa0a7dd204196fd1afa631cf6de082a5cc0a36', 'v0.0.495'),
('squeezenet_v1_0', '1734', 'e6f8b0e8253cef1c5c071dfaf2df5fdfc6a64f8c', 'v0.0.128'),
('squeezenet_v1_1', '1739', 'd7a1483aaa1053c7cd0cf08529b2b87ed2781b35', 'v0.0.88'),
('squeezeresnet_v1_0', '1767', '66474b9b6a771055b28c37b70621c026a1ef6ef4', 'v0.0.178'),
('squeezeresnet_v1_1', '1784', '26064b82773e7a7175d6038976a73abfcd5ed2be', 'v0.0.70'),
('sqnxt23_w1', '1866', '73b700c40de5f7be9d2cf4ed30cc8935c670a3c3', 'v0.0.171'),
('sqnxt23v5_w1', '1743', '7a83722e7d362cef950d8534020f837caf9e6314', 'v0.0.172'),
('sqnxt23_w3d2', '1321', '4d733bcd19f1e502ebc46b52f0b69d959636902e', 'v0.0.210'),
('sqnxt23v5_w3d2', '1268', '4f98bbd3841d8d09a067100841f64ce3eccf184a', 'v0.0.212'),
('sqnxt23_w2', '1063', '95d9b55a5e857298bdb7974db6e3dbd9ecc94401', 'v0.0.240'),
('sqnxt23v5_w2', '1024', '707246f323bc95d0ea2d5608e9e85ae9fe59773a', 'v0.0.216'),
('shufflenet_g1_wd4', '3677', 'ee58f36811d023e1b2e651469c470e588c93f9d3', 'v0.0.134'),
('shufflenet_g3_wd4', '3617', 'bd08e3ed6aff4993cf5363fe8acaf0b22394bea0', 'v0.0.135'),
('shufflenet_g1_wd2', '2238', 'f77dcd18d3b759a3046bd4a2443c40e4ff455313', 'v0.0.174'),
('shufflenet_g3_wd2', '2060', 'ea6737a54bce651a0e8c0b533b982799842cb1c8', 'v0.0.167'),
('shufflenet_g1_w3d4', '1675', '2f1530aa72ee04e3599c5296b590a835d9d50e7f', 'v0.0.218'),
('shufflenet_g3_w3d4', '1609', 'e008e926f370af28e587f349384238d240a0fc02', 'v0.0.219'),
('shufflenet_g1_w1', '1350', '01934ee8f4bf7eaf4e36dd6442debb84ca2a2849', 'v0.0.223'),
('shufflenet_g2_w1', '1332', 'f5a1479fd8523032ee17a4de00fefd33ff4d31e6', 'v0.0.241'),
('shufflenet_g3_w1', '1329', 'ac58d62c5f277c0e9e5a119cc1f48cb1fcfc8306', 'v0.0.244'),
('shufflenet_g4_w1', '1310', '73c039ebf56f9561dd6eecc4cbad1ab1db168ed1', 'v0.0.245'),
('shufflenet_g8_w1', '1319', '9a50ddd9ce67ec697e3ed085d6c39e3d265f5719', 'v0.0.250'),
('shufflenetv2_wd2', '1830', '156953de22d0e749c987da4a58e0e53a5fb18291', 'v0.0.90'),
('shufflenetv2_w1', '1123', '27435039ab7794c86ceab11bd93a19a5ecab78d2', 'v0.0.133'),
('shufflenetv2_w3d2', '0913', 'f132506c9fa5f0eb27398f9936b53423d0cd5b66', 'v0.0.288'),
('shufflenetv2_w2', '0823', '2d67ac62057103fd2ed4790ea0058e0922abdd0f', 'v0.0.301'),
('shufflenetv2b_wd2', '1782', '845a9c43cf4a9873f89c6116634e74329b977e64', 'v0.0.157'),
('shufflenetv2b_w1', '1101', 'f679702f7c626161413320160c6c9c199de9b667', 'v0.0.161'),
('shufflenetv2b_w3d2', '0879', '4022da3a5922127b1acf5327bd9f1d4d55726e05', 'v0.0.203'),
('shufflenetv2b_w2', '0810', '7429df751916bf24bd7fb86bc137ae36275b9d19', 'v0.0.242'),
('menet108_8x1_g3', '2030', 'aa07f925180834389cfd3bf50cb22d2501225118', 'v0.0.89'),
('menet128_8x1_g4', '1913', '0c890a76fb23c0af50fdec076cb16d0f0ee70355', 'v0.0.103'),
('menet160_8x1_g8', '2028', '4f28279a94e631f6a51735de5ea29703cca69845', 'v0.0.154'),
('menet228_12x1_g3', '1289', '2dc2eec7c9ebb41c459450e1843503b5ac7ecb3a', 'v0.0.131'),
('menet256_12x1_g4', '1216', '7caf63d15190648e266a4e7520c3ad677716f388', 'v0.0.152'),
('menet348_12x1_g3', '0936', '62c72b0b56460f062d4da7155bd64a524f42fb88', 'v0.0.173'),
('menet352_12x1_g8', '1167', '5892fea4e44eb27814a9b092a1a06eb81cea7844', 'v0.0.198'),
('menet456_24x1_g3', '0780', '7a89b32c89f878ac63fc96ddc71cb1a5e91c84d6', 'v0.0.237'),
('mobilenet_wd4', '2218', '3185cdd29b3b964ad51fdd7820bd65f091cf281f', 'v0.0.62'),
('mobilenet_wd2', '1330', '94f13ae1375b48892d8ecbb4a253bb583fe27277', 'v0.0.156'),
('mobilenet_w3d4', '1051', '6361d4b4192b5fc68f3409100d825e8edb28876b', 'v0.0.130'),
('mobilenet_w1', '0865', 'eafd91e9369abb09726f2168aba24453b17fc22e', 'v0.0.155'),
('mobilenetb_wd4', '2165', '2070764e0b3be74922eb5fa0a4342c693821ba90', 'v0.0.481'),
('mobilenetb_wd2', '1271', '799ef980b2726a77d4b68d99f520d9d6bc7d86dc', 'v0.0.480'),
('mobilenetb_w3d4', '1020', 'b01c8bacda6f8e26b34a0313a4dc3883511760f7', 'v0.0.481'),
('mobilenetb_w1', '0788', '82664eb4c1f2ddd0ac163f50263237f7667223f3', 'v0.0.489'),
('fdmobilenet_wd4', '3053', 'd4f18e5b4ed63e5426eafbf5db7f8e2a97c28581', 'v0.0.177'),
('fdmobilenet_wd2', '1969', '242b9fa82d54f54f08b4bdbb194b7c89030e7bc4', 'v0.0.83'),
('fdmobilenet_w3d4', '1601', 'cb10c3e129706d3023d752e7402965af08f91ca7', 'v0.0.159'),
('fdmobilenet_w1', '1312', '95fa0092aac013c88243771faf66ef1134b7574d', 'v0.0.162'),
('mobilenetv2_wd4', '2412', 'd92b5b2dbb52e27354ddd673e6fd240a0cf27175', 'v0.0.137'),
('mobilenetv2_wd2', '1442', 'd7c586c716e3ea85e793f7c5aaf9cae2a907117b', 'v0.0.170'),
('mobilenetv2_w3d4', '1044', '768454f4bdaae337c180bb81248b8c5b8d31040b', 'v0.0.230'),
('mobilenetv2_w1', '0864', '6e58b1cb96852e4c6de6fc9cd11241384af21df9', 'v0.0.213'),
('mobilenetv2b_wd4', '2338', '77ba7e8d41542d311e240dab75e4d29fa0677fb9', 'v0.0.483'),
('mobilenetv2b_wd2', '1373', '3bfc8a592a0881c2cb025f52a09fb5057a7896be', 'v0.0.486'),
('mobilenetv2b_w3d4', '1064', '5d4dc4e5622043697382272183a3d0bd43dbc218', 'v0.0.483'),
('mobilenetv2b_w1', '0884', 'ab0ea3993e7c533f0aea5793331dc6302d715e9c', 'v0.0.483'),
('mobilenetv3_large_w1', '0729', 'db741a9938acc8a5fd9544aaf53b41aebb98e021', 'v0.0.491'),
('igcv3_wd4', '2830', '71abf6e0b6bff1d3a3938bfea7c752b59ac05e9d', 'v0.0.142'),
('igcv3_wd2', '1703', '145b7089e1d0e0ce88f17393a357d5bb4ae37734', 'v0.0.132'),
('igcv3_w3d4', '1096', '3c7c86fc43df2e5cf95a451ebe07fccf2d9dc076', 'v0.0.207'),
('igcv3_w1', '0900', 'e2c3da1cffd8e42da7a052b80db2f86758c8d35b', 'v0.0.243'),
('mnasnet_b1', '0723', 'a6f74cf912fa5b1ee4bb9825f3143a0c1ced03be', 'v0.0.493'),
('mnasnet_a1', '0705', '3efe98a3bd6ea0a8cb0902a4dba424c134145d5f', 'v0.0.486'),
('darts', '0756', 'c2c7c33ba60d1052f95bcae72128fc47b1214cff', 'v0.0.485'),
('proxylessnas_cpu', '0750', '256da7c8a05cd87a59e30e314b22dc1d4565946e', 'v0.0.324'),
('proxylessnas_gpu', '0724', 'd9ce80964e37fb30bddcc552f1d68361b1a94873', 'v0.0.333'),
('proxylessnas_mobile', '0780', 'b8bb5a64f333562475dcfc09eeb7e603d6e66afb', 'v0.0.326'),
('proxylessnas_mobile14', '0651', 'f08baec85343104994b821581cde3ee965a2c593', 'v0.0.331'),
('fbnet_cb', '0761', '3db688f2fa465bc93bc546b4432389fe33aec5b3', 'v0.0.486'),
('xception', '0511', '9755eb77589d627d816fe9806a2b2aa931b6c2f6', 'v0.0.544'),
('inceptionv3', '0533', '4a05a7ffcb5963d44678933596ef85a0f724ebab', 'v0.0.552'),
('inceptionv4', '0488', 'a828ae6ccccb3178257d20e5f17cc8c6ac4348cc', 'v0.0.543'),
('inceptionresnetv1', '0481', '3cc3b4959349e56d6b29619b07ae020e19039f67', 'v0.0.552'),
('inceptionresnetv2', '0470', '4ea29355ae39ececb733e949dfdcbdff72272559', 'v0.0.547'),
('polynet', '0453', '742803144e5a2a6148212570726350da09adf3f6', 'v0.0.96'),
('nasnet_4a1056', '0790', 'f89dd74f47e42c35c9a1182f248df1d319524db7', 'v0.0.495'),
('nasnet_6a4032', '0424', '73cca5fee009db77412c5fca7c826b3563752757', 'v0.0.101'),
('pnasnet5large', '0428', '998a548f44ac1b1ac6c4959a721f2675ab5c48b9', 'v0.0.114'),
('spnasnet', '0776', '09cc881e024d69ab3bd88bef332becb6147a0651', 'v0.0.490'),
('efficientnet_b0', '0722', '041a8346ad6a13dbb66384d9a41ed20d959d3e77', 'v0.0.364'),
('efficientnet_b1', '0626', '455dcb2a05a1295c8f6b728d4f6c72c507e1369a', 'v0.0.376'),
('efficientnet_b0b', '0670', '8892ba581b0d81dbc8fe56c49d81d3dd007b1db8', 'v0.0.403'),
('efficientnet_b1b', '0565', 'c29a1b67804b70856ed3cc329256b80e3afc04ad', 'v0.0.403'),
('efficientnet_b2b', '0516', '7532826e5c14f7ade9e8ea9ac92044d817575b06', 'v0.0.403'),
('efficientnet_b3b', '0431', '1e342ec2160e6de813f9bd4d0ab9ce3b5749780e', 'v0.0.403'),
('efficientnet_b4b', '0376', 'b60e177974539fc6dcc9fef3b90591bfc9949514', 'v0.0.403'),
('efficientnet_b5b', '0334', 'cd70ae717ddca72430efe91cf7a3c4e28bcd61ac', 'v0.0.403'),
('efficientnet_b6b', '0312', 'f581d9f046032e28e532082fa49bfd373952db4f', 'v0.0.403'),
('efficientnet_b7b', '0311', '2b8a6040588aea44b57df89e2d9239d906737508', 'v0.0.403'),
('efficientnet_b0c', '0646', '81eabd2992ba7bb80c1c1a7e20373e7c65aa1286', 'v0.0.433'),
('efficientnet_b1c', '0555', '10b5589de6ee9af3c67f0ae35a424b4adc0a9e35', 'v0.0.433'),
('efficientnet_b2c', '0489', '6f649ece72d0334e5191da78767f2ac9149e85f0', 'v0.0.433'),
('efficientnet_b3c', '0434', 'e1e2a1b7f3457bdd8f46a5858472ad4acd3d2362', 'v0.0.433'),
('efficientnet_b4c', '0359', 'cdb2012d6688b0208527d36817589095a1db1031', 'v0.0.433'),
('efficientnet_b5c', '0302', '3240f368eb5e7ed78b9ac0d68800fbea9b220e9b', 'v0.0.433'),
('efficientnet_b6c', '0285', 'e71a1ccc3e876a7d64818c83518dd0e64c630d3e', 'v0.0.433'),
('efficientnet_b7c', '0277', 'feea7daf3478645131c94319c71141df559dce19', 'v0.0.433'),
('efficientnet_b8c', '0270', '050ec6358583d0a48c74b02563474ecc1d9dacba', 'v0.0.433'),
('efficientnet_edge_small_b', '0629', '5b398abc73c4c4870d88c62452ff919bec2440c9', 'v0.0.434'),
('efficientnet_edge_medium_b', '0553', '0b3c86d49b3684d19e6589030aeceb918faa648c', 'v0.0.434'),
('efficientnet_edge_large_b', '0477', '055436da0fa440933b906c528cb34b200e28f73c', 'v0.0.434'),
('mixnet_s', '0703', '135aa0426712a9f60afe2cfff2df9607f3fc2d68', 'v0.0.493'),
('mixnet_m', '0631', '0881aba9281e3cae3e7cddd75d385c3cccdf7e25', 'v0.0.493'),
('mixnet_l', '0557', '94cbf10cb34c3e3940117d86c28bcc6dbfe9e005', 'v0.0.500'),
('resneta10', '1159', 'a66e01d9f567ce747e7c255adac848a33c54a3a5', 'v0.0.484'),
('resnetabc14b', '0956', '6f8c36067feb2d27b5e0813bf18ef29038db0d70', 'v0.0.477'),
('resneta18', '0802', '225dd3ae0eac3ce3815732ac4ab0e25df709a75c', 'v0.0.486'),
('resneta50b', '0534', '28eff48a72d892802dde424db3fd0e1a9c12be16', 'v0.0.492'),
('resneta101b', '0442', 'cdf5f7ac2ad291cdc35b351e9220a490c8cc996c', 'v0.0.532'),
('resneta152b', '0424', 'deaaaabf413821eb543621826c368493b9041438', 'v0.0.524'),
('resnetd50b', '0549', '17d6004b5c6c1b97cfb47377ae5076810c5d88be', 'v0.0.296'),
('resnetd101b', '0461', 'fead1bcb86bba2be4ed7f0033fa972dc613e3280', 'v0.0.296'),
('resnetd152b', '0467', 'd0fe2fe09c6462de17aca4a72bbcb08b76a66e02', 'v0.0.296'),
('nin_cifar10', '0743', '9696dc1a8f67e7aa233836bcbdb99625769b1e86', 'v0.0.175'),
('nin_cifar100', '2839', 'eed0e9af2cd8e5aa77bb063204525812dbd9190f', 'v0.0.183'),
('nin_svhn', '0376', '7cb750180b0a981007194461bf57cfd90eb59c88', 'v0.0.270'),
('resnet20_cifar10', '0597', '13c5ab19145591d75873da3497be1dd1bd2afd46', 'v0.0.163'),
('resnet20_cifar100', '2964', '4e1443526ee96648bfe4d4954871a97a9c9622f4', 'v0.0.180'),
('resnet20_svhn', '0343', '7ac0d94a4563c9611092ce08f2124a3828103139', 'v0.0.265'),
('resnet56_cifar10', '0452', 'a73e63e9d0f3f7adde59b4142323c0dd05930de7', 'v0.0.163'),
('resnet56_cifar100', '2488', '590977100774a289b91088245dd2bd0cbe6567e6', 'v0.0.181'),
('resnet56_svhn', '0275', 'e676e4216a771b7d0339e87284c7ebb03af8ed25', 'v0.0.265'),
('resnet110_cifar10', '0369', 'f89f1c4d9fdd9e5cd00949a872211376979ff703', 'v0.0.163'),
('resnet110_cifar100', '2280', '6c5fa14bb4ced2dffe6ee1536306687aae57f9cb', 'v0.0.190'),
('resnet110_svhn', '0245', '0570b5942680cf88c66ae9a76c0e7ff0a41e71a6', 'v0.0.265'),
('resnet164bn_cifar10', '0368', 'e7941eeeddef9336664522eaa3af92d77128cac0', 'v0.0.179'),
('resnet164bn_cifar100', '2044', 'c7db7b5e6fbe6dc0f9501d25784f1a107c6e0315', 'v0.0.182'),
('resnet164bn_svhn', '0242', '8cdce67452d2780c7c69f4d0b979e80189d4bff8', 'v0.0.267'),
('resnet272bn_cifar10', '0333', '99dc36ca2abc91f3f82db181a14c5364cd5526be', 'v0.0.368'),
('resnet272bn_cifar100', '2007', '088af5c23634fe75206081d946fc82fdc9e999ad', 'v0.0.368'),
('resnet272bn_svhn', '0243', '39d741c8d081ebd2266a114e82363839ffdf8ebb', 'v0.0.368'),
('resnet542bn_cifar10', '0343', 'e687b254e1eace223ceef39ad17106e61b8649ba', 'v0.0.369'),
('resnet542bn_cifar100', '1932', 'df8bd5264c1db11dd545f62e9c750c7976edccc9', 'v0.0.369'),
('resnet542bn_svhn', '0234', '4f78075cbcba196fc8f5297b71730906c1bf7d8a', 'v0.0.369'),
('resnet1001_cifar10', '0328', 'bb979d53089138b5060b418cad6c8ad9a940bf81', 'v0.0.201'),
('resnet1001_cifar100', '1979', '692d9516620bc8b7a4da30a98ebcb7432243f5e9', 'v0.0.254'),
('resnet1001_svhn', '0241', '031fb0ce5e5ddbebca2fd7d856d63ddd147fe933', 'v0.0.408'),
('resnet1202_cifar10', '0353', '377510a63595e544333f6f57523222cd845744a8', 'v0.0.214'),
('resnet1202_cifar100', '2156', '1d94f9ccdd81e1785ea6ec02a861a4a05f39e5c9', 'v0.0.410'),
('preresnet20_cifar10', '0651', 'daa895737a34edda75c40f2d8566660590c84a3f', 'v0.0.164'),
('preresnet20_cifar100', '3022', '37f15365d48768f792f4551bd6ccf5259bc70530', 'v0.0.187'),
('preresnet20_svhn', '0322', '608cee12c0bc3cb59feea96386f6c12c6da91ba5', 'v0.0.269'),
('preresnet56_cifar10', '0449', 'cb37cb9d4524d4e0f5724aeed9face455f527efc', 'v0.0.164'),
('preresnet56_cifar100', '2505', '4c39e83f567f15d6ee0d69bf2dcaccd62067dfe5', 'v0.0.188'),
('preresnet56_svhn', '0280', 'b974c2c96a18ff2278f1d33df58c8537f9139ed9', 'v0.0.269'),
('preresnet110_cifar10', '0386', 'd6d4b7bd9f154eca242482a7559413d5c7b6d465', 'v0.0.164'),
('preresnet110_cifar100', '2267', '18cf4161c67c03e50cff7eb30988a559f3f97260', 'v0.0.191'),
('preresnet110_svhn', '0279', '6804450b744fa922d9ec22aa4c792d3a5da812f6', 'v0.0.269'),
('preresnet164bn_cifar10', '0364', '7ecf30cb818f80908ef4a77af4660c1080d0df81', 'v0.0.196'),
('preresnet164bn_cifar100', '2018', 'a20557c8968c04d8d07e40fdc5b0d1ec1fb3339d', 'v0.0.192'),
('preresnet164bn_svhn', '0258', '4aeee06affea89767c058fe1650b7476f05d8563', 'v0.0.269'),
('preresnet272bn_cifar10', '0325', '944ba29df55afcf2789399de552e91578edd4295', 'v0.0.389'),
('preresnet272bn_cifar100', '1963', '38e296beff0cd92697235c717a801ec422cdafe3', 'v0.0.389'),
('preresnet272bn_svhn', '0234', '7ff97873447fbfb1d823fd43439e34351f149c13', 'v0.0.389'),
('preresnet542bn_cifar10', '0314', 'ac40a67bb3b7f02179ff3c4fa0d6533ff3e2dd9f', 'v0.0.391'),
('preresnet542bn_cifar100', '1871', 'd536ad01fc40fe19605a3409efb995bf8593aa29', 'v0.0.391'),
('preresnet542bn_svhn', '0236', '3a4633f14e96cce30086ef4149b52c0b7cbccce6', 'v0.0.391'),
('preresnet1001_cifar10', '0265', '50507ff74b6047abe6d04af6471d9bacafa05e24', 'v0.0.209'),
('preresnet1001_cifar100', '1841', '185e033d77e61cec588196e3fe8bf8dcb43acfab', 'v0.0.283'),
('preresnet1202_cifar10', '0339', '942cf6f22d80b5428256825234a252b8d6ebbe9d', 'v0.0.246'),
('resnext20_1x64d_cifar10', '0433', '0661d12e534a87bd5f3305d541afce5730d45492', 'v0.0.365'),
('resnext20_1x64d_cifar100', '2197', 'e7073542469be79876e3b3aeccb767638a136b93', 'v0.0.365'),
('resnext20_1x64d_svhn', '0298', '3c7febc8eee0887ebc54d5e1a4411a46066f0624', 'v0.0.365'),
('resnext20_2x32d_cifar10', '0453', 'afb48ca4764efaff92e4cc50ec11566993180ac1', 'v0.0.365'),
('resnext20_2x32d_cifar100', '2255', '995281ee5b0d021bf6e9e984802e30a0042e0290', 'v0.0.365'),
('resnext20_2x32d_svhn', '0296', '54189677f599b4f42d2469ad75921165f84d82dc', 'v0.0.365'),
('resnext20_2x64d_cifar10', '0403', '6f0c138fe13a73c9f149053065f1f430761e75be', 'v0.0.365'),
('resnext20_2x64d_cifar100', '2060', '5f6dfa3ff5f1b0e3e1441f554a80c504c1991a03', 'v0.0.365'),
('resnext20_2x64d_svhn', '0283', '9c77f074dcd6333a859da6ddd833a662031406ea', 'v0.0.365'),
('resnext20_4x16d_cifar10', '0470', 'ae1ba8697ec0a62a58edb9c87ef0d9f290d3c857', 'v0.0.365'),
('resnext20_4x16d_cifar100', '2304', '2c9d578ab797ef887b3e8c51d897a8e720880bfd', 'v0.0.365'),
('resnext20_4x16d_svhn', '0317', '6691c8f56f7a63b5638408356a7f9fe4931d7ac9', 'v0.0.365'),
('resnext20_4x32d_cifar10', '0373', 'cf6960607fb51269cb80d6900eadefd16c6bb511', 'v0.0.365'),
('resnext20_4x32d_cifar100', '2131', '2c558efca7c66c412843603cff137e9565d7c016', 'v0.0.365'),
('resnext20_4x32d_svhn', '0298', '1da9a7bf6bed55f89b1c19a9b6ff97f7fa674c81', 'v0.0.365'),
('resnext20_8x8d_cifar10', '0466', '280e5f89e24ca765a771e733f53377ede244207f', 'v0.0.365'),
('resnext20_8x8d_cifar100', '2282', '363f03e8040da3a2f1cfe88df99481b23382e28c', 'v0.0.365'),
('resnext20_8x8d_svhn', '0318', 'c1536efbb2d7c78ee5b9323b43e3d0d56e9d2aa4', 'v0.0.365'),
('resnext20_8x16d_cifar10', '0404', '4d7f72818137674c3c17257427b0c5788b250320', 'v0.0.365'),
('resnext20_8x16d_cifar100', '2172', '3fc47c7072039fd5437ed276e92a0840c94fe82e', 'v0.0.365'),
('resnext20_8x16d_svhn', '0301', '41b28fd308e8195ee74050a2c56e2085552a55e0', 'v0.0.365'),
('resnext20_16x4d_cifar10', '0404', '426b5b2f994bc8154cb17957cf7d487b156d4fe2', 'v0.0.365'),
('resnext20_16x4d_cifar100', '2282', '508d32271ea1d34d40305a67d64cf1547ca58ae6', 'v0.0.365'),
('resnext20_16x4d_svhn', '0321', '854df3b71743fe871b5cd9b72ff6197b34c57b5e', 'v0.0.365'),
('resnext20_16x8d_cifar10', '0394', 'f81d05668bf293e5d84150ec54f94c4a50b79d9c', 'v0.0.365'),
('resnext20_16x8d_cifar100', '2173', 'a246aea51d1fd165c9f6db2aa528a2afa6a03dcd', 'v0.0.365'),
('resnext20_16x8d_svhn', '0293', '31f4b14e3113085a6b9a8e384a3131af764afe82', 'v0.0.365'),
('resnext20_32x2d_cifar10', '0461', '2d6ee8362c497b7782b91f85d5c9fd0f4ac0470e', 'v0.0.365'),
('resnext20_32x2d_cifar100', '2322', 'ce65201429a93cf9175c0e2b8601ceaf538a2438', 'v0.0.365'),
('resnext20_32x2d_svhn', '0327', '2499ff6de736f1241bc58422ae9d1d39d8ac1014', 'v0.0.365'),
('resnext20_32x4d_cifar10', '0420', 'a365893948daea8ef5b20b61bc3519fe199381af', 'v0.0.365'),
('resnext20_32x4d_cifar100', '2213', '5b2ffba877ea3dd0aa86d7c38ab5467d4ed856e6', 'v0.0.365'),
('resnext20_32x4d_svhn', '0309', 'ddbef9ac7da748e162ac886621833a5aef84cb86', 'v0.0.365'),
('resnext20_64x1d_cifar10', '0493', '6618e9ac6df3bc2614c23ba0e39ea0d0d5d21eec', 'v0.0.365'),
('resnext20_64x1d_cifar100', '2353', '9c789af45dd1455b4b7fe94a21709bb9dc8c411b', 'v0.0.365'),
('resnext20_64x1d_svhn', '0342', '2591ea440078f6c089513ace548ed79fb0f9bb76', 'v0.0.365'),
('resnext20_64x2d_cifar10', '0438', '32fe188b44b9e7f80ddec8024799be871dd63c4e', 'v0.0.365'),
('resnext20_64x2d_cifar100', '2235', '62fcc38a8e37550ace0f67927d2f5a74990de920', 'v0.0.365'),
('resnext20_64x2d_svhn', '0314', '4c01490b9566eeb2aaf27446ca8f5bac621894f0', 'v0.0.365'),
('resnext29_32x4d_cifar10', '0315', 'c8a1beda8ba616dc9af682d3ac172bfdd7a2472d', 'v0.0.169'),
('resnext29_32x4d_cifar100', '1950', '5f2eedcdd5cea6fdec1508f261f556a953ae28c2', 'v0.0.200'),
('resnext29_32x4d_svhn', '0280', 'dcb6aef96fbd76aa249e8f834093e2384b898404', 'v0.0.275'),
('resnext29_16x64d_cifar10', '0241', '76b97a4dd6185602a8ca8bdd77a70f8ddfcd4e83', 'v0.0.176'),
('resnext29_16x64d_cifar100', '1693', '1fcec90d6425e0405c61a1e90a80701ea556beca', 'v0.0.322'),
('resnext29_16x64d_svhn', '0268', 'c57307f3bf70d0f39d6cfb1dc2c82d8ef9e89603', 'v0.0.358'),
('resnext56_1x64d_cifar10', '0287', '8edd977c69cfec45604f1b0dbbe36fb2b65f122d', 'v0.0.367'),
('resnext56_1x64d_cifar100', '1825', 'b78642c10e21afc477c27076e2ab9ec6854cca13', 'v0.0.367'),
('resnext56_1x64d_svhn', '0242', '860c610caa23e742093d7c7677253ee5b917af44', 'v0.0.367'),
('resnext56_2x32d_cifar10', '0301', 'd0284dff7382d969c59e602f66b25a72b8d9e232', 'v0.0.367'),
('resnext56_2x32d_cifar100', '1786', '32205070280db6bd6431a6b7ad0537524af5a6f0', 'v0.0.367'),
('resnext56_2x32d_svhn', '0246', 'ffb8df9ba6a5053c79dbdecf7d0b6f1d248fcfdf', 'v0.0.367'),
('resnext56_4x16d_cifar10', '0311', 'add022e7dd21245720e789a0a16a7475d15b5702', 'v0.0.367'),
('resnext56_4x16d_cifar100', '1809', '366de7b5abba9b09d1ffde87e3e9f379db73d691', 'v0.0.367'),
('resnext56_4x16d_svhn', '0244', 'f7b697f924eeb64ed14d21f576a4a82007937bbe', 'v0.0.367'),
('resnext56_8x8d_cifar10', '0307', '4f0b72469f9890079471db355e9bfb405d443317', 'v0.0.367'),
('resnext56_8x8d_cifar100', '1806', '827a485e2d5f3154251d6453563a73a511028445', 'v0.0.367'),
('resnext56_8x8d_svhn', '0247', 'f0550cd045d9f4a4f8648f0c9c517421a34f4a11', 'v0.0.367'),
('resnext56_16x4d_cifar10', '0312', '93d71b610a356f8e285dde08ad4fe96d79efcadd', 'v0.0.367'),
('resnext56_16x4d_cifar100', '1824', '9cb7a1326870878315dccb50b0be51968d6ddfa7', 'v0.0.367'),
('resnext56_16x4d_svhn', '0256', '943386bd2e0fabba153955d80171016a95482609', 'v0.0.367'),
('resnext56_32x2d_cifar10', '0314', 'ea8b43351206a567f865a9a53ecae65790da8a60', 'v0.0.367'),
('resnext56_32x2d_cifar100', '1860', '3f65de935e532fe3c18324f645260983c61c0c76', 'v0.0.367'),
('resnext56_32x2d_svhn', '0253', 'ba8c809dcaa9f6f6e3b2e12dfd9df1b123d11be7', 'v0.0.367'),
('resnext56_64x1d_cifar10', '0341', '12a684ad58d1d3407d61b7de4b2cedc042225d20', 'v0.0.367'),
('resnext56_64x1d_cifar100', '1816', 'b80f4315d53963dd3d1ac856d3c2dbf660b836eb', 'v0.0.367'),
('resnext56_64x1d_svhn', '0255', '144bab62a5ed27bcb2465d9ab8b9b4ce0bf31dc5', 'v0.0.367'),
('resnext272_1x64d_cifar10', '0255', 'c1a3fddc4de9f6ebee2a588bd585ede68974da4f', 'v0.0.372'),
('resnext272_1x64d_cifar100', '1911', 'e0b3656a204c40e0b4ee6ade1e8d262c521e842d', 'v0.0.372'),
('resnext272_1x64d_svhn', '0235', '025ee7b915fa25d617e9774b0303f35488445d78', 'v0.0.372'),
('resnext272_2x32d_cifar10', '0274', '23b391ce58d9694ea35240ec3b76ce8b0ebf66b8', 'v0.0.375'),
('resnext272_2x32d_cifar100', '1834', '4802083b5fda38a4a30b8a2b1f24eb4b8fdf55ad', 'v0.0.375'),
('resnext272_2x32d_svhn', '0244', 'b65ddfe317dabceb4d4d7e910ca07c0c575ad9b8', 'v0.0.375'),
('seresnet20_cifar10', '0601', '3411e5ad1060975c45fe6d0d836755a92e3bb27c', 'v0.0.362'),
('seresnet20_cifar100', '2854', '184ad148171fd9244bf8570eee6647a996678ab4', 'v0.0.362'),
('seresnet20_svhn', '0323', 'a3a3c67731eb8bea0cd3af3b8b2f88c1cc70987e', 'v0.0.362'),
('seresnet56_cifar10', '0413', '21bac136e4cac21abb3e08b60254c73b16f0190f', 'v0.0.362'),
('seresnet56_cifar100', '2294', '989d4d9227c4fc33440c267f1e1ac324fd246ad4', 'v0.0.362'),
('seresnet56_svhn', '0264', '63a155acd6407b5e96516b86f6a7cd9e6855c372', 'v0.0.362'),
('seresnet110_cifar10', '0363', 'fa3f09a88d24282e938488c4588968e273770605', 'v0.0.362'),
('seresnet110_cifar100', '2086', '5345be4166268ce2cd44d88eb9edba9f86ccc864', 'v0.0.362'),
('seresnet110_svhn', '0235', 'd129498ad625983d92048f32b80de5d16987779a', 'v0.0.362'),
('seresnet164bn_cifar10', '0339', '11c923152587746a5539a9e4f140db847b9b61c1', 'v0.0.362'),
('seresnet164bn_cifar100', '1995', '6c9dc66b86de6be67df1e59d4aff4a592d7d98b8', 'v0.0.362'),
('seresnet164bn_svhn', '0245', 'd97ea6c83b0fd1da3a976d518977b19bd466d015', 'v0.0.362'),
('seresnet272bn_cifar10', '0339', 'da4073add21614f22b231d8663867007f3f2312d', 'v0.0.390'),
('seresnet272bn_cifar100', '1907', '754af9375f060f55f8c3393a70659df460b8d47d', 'v0.0.390'),
('seresnet272bn_svhn', '0238', '9ffe8acad2a03cd98f6746d3f1528fe5f294aea4', 'v0.0.390'),
('seresnet542bn_cifar10', '0347', 'e64d9ca4b98349973b802572af6625879bc3c4a4', 'v0.0.385'),
('seresnet542bn_cifar100', '1887', 'cd76c769c06b886d2a94268c95691dfc905bac64', 'v0.0.385'),
('seresnet542bn_svhn', '0226', '05ce3771c46aa8b7af7c2056398f83b2b9f116db', 'v0.0.385'),
('sepreresnet20_cifar10', '0618', 'e55551e6e35d04fe8e35d24a5e3d608a08e8dfa2', 'v0.0.379'),
('sepreresnet20_cifar100', '2831', 'ee5d3bd66ce643950e54d54825036b98b13b31cd', 'v0.0.379'),
('sepreresnet20_svhn', '0324', 'd5bb6768cc3134137371a832c5ebc289d982a8db', 'v0.0.379'),
('sepreresnet56_cifar10', '0451', '56c299345242bcfca52c83f77cfff80b7058b1fe', 'v0.0.379'),
('sepreresnet56_cifar100', '2305', '313a7a30a129c174ecaf3de6a94a5fd34dc8d711', 'v0.0.379'),
('sepreresnet56_svhn', '0271', 'f556af3db771e6e4e5b847bd4eddef71b879b8d0', 'v0.0.379'),
('sepreresnet110_cifar10', '0454', '67eea1cc03f76ee0054d39f004ab10f7a70978bb', 'v0.0.379'),
('sepreresnet110_cifar100', '2261', '3291a56be67afd1154d9d2d05e2e1411c12dcb4a', 'v0.0.379'),
('sepreresnet110_svhn', '0259', '5c09cacbcf786e18509947c40de695883d6b3328', 'v0.0.379'),
('sepreresnet164bn_cifar10', '0373', 'ac72ac7fa9d78e66a719717f922f738bbe7f9699', 'v0.0.379'),
('sepreresnet164bn_cifar100', '2005', 'd93993672a414e8f19cb12d2489be930e8605b8f', 'v0.0.379'),
('sepreresnet164bn_svhn', '0256', 'a45d1a65092900bb768969c52f26c61292838caa', 'v0.0.379'),
('sepreresnet272bn_cifar10', '0339', '3e47d575280a70c1726d6b2eb8d0d7c069a3e472', 'v0.0.379'),
('sepreresnet272bn_cifar100', '1913', 'd243b0580717b197bf9eb32a8959c83d21a3124f', 'v0.0.379'),
('sepreresnet272bn_svhn', '0249', '34b910cdfa34bf318f58c3312bc074059c7c669f', 'v0.0.379'),
('sepreresnet542bn_cifar10', '0308', '05f7d4a6bfb1af1825b734eac99b3b46dd8c4b91', 'v0.0.382'),
('sepreresnet542bn_cifar100', '1945', '4dd0e21d02fef2ae1fe3fb3a8cd8c72db11bb685', 'v0.0.382'),
('sepreresnet542bn_svhn', '0247', '456035daf4daecd909e957da090078deba6cb449', 'v0.0.382'),
('pyramidnet110_a48_cifar10', '0372', '35b94d0575c2081a142e71955c8ceea8c51ec5e5', 'v0.0.184'),
('pyramidnet110_a48_cifar100', '2095', '00fd42a00492b2bbb28cacfb7b1a6c63072c37a3', 'v0.0.186'),
('pyramidnet110_a48_svhn', '0247', 'd8a5c6e20b6cc01989a52f9e307caf640169ed0a', 'v0.0.281'),
('pyramidnet110_a84_cifar10', '0298', '81710d7ab90838a8a299bf5f50aed2a3fa41f0e3', 'v0.0.185'),
('pyramidnet110_a84_cifar100', '1887', '6712d5dc69452f2fde1fcc3ee32c3164dcaffc4e', 'v0.0.199'),
('pyramidnet110_a84_svhn', '0243', '473cc640c4ad0a1642500c84f2ef848498d12a37', 'v0.0.392'),
('pyramidnet110_a270_cifar10', '0251', '1e769ce50ef915a807ee99907912c87766fff60f', 'v0.0.194'),
('pyramidnet110_a270_cifar100', '1710', '2732fc6430085192189fd7ccfd287881cc5a6c0d', 'v0.0.319'),
('pyramidnet110_a270_svhn', '0238', '034be5421b598e84f395f421182f664495ca62ca', 'v0.0.393'),
('pyramidnet164_a270_bn_cifar10', '0242', 'c4a79ea3d84344b9d352074122e37f593ee98fd2', 'v0.0.264'),
('pyramidnet164_a270_bn_cifar100', '1670', '08f46c7ff99e9c3fd7b5262e34dc8a00b316646f', 'v0.0.312'),
('pyramidnet164_a270_bn_svhn', '0233', '27b67f1494ed508e0192e3e0f09ac86e32e1e734', 'v0.0.396'),
('pyramidnet200_a240_bn_cifar10', '0244', '52f4d43ec4d952f847c3a8e0503d5a4e6286679c', 'v0.0.268'),
('pyramidnet200_a240_bn_cifar100', '1609', 'e61e7e7eb6675aaf7a18461fea9bb3a53538d43b', 'v0.0.317'),
('pyramidnet200_a240_bn_svhn', '0232', '02bf262e70d9b3ce8038255d0abdec2bc5161f6d', 'v0.0.397'),
('pyramidnet236_a220_bn_cifar10', '0247', '1bd295a7fb834f639b238ffee818b3bde4126c81', 'v0.0.285'),
('pyramidnet236_a220_bn_cifar100', '1634', 'f066b3c6a4d217c42f5e8872fe23d343afe378ec', 'v0.0.312'),
('pyramidnet236_a220_bn_svhn', '0235', '1a0c0711f013035c0e05145501c93fa2519603ea', 'v0.0.398'),
('pyramidnet272_a200_bn_cifar10', '0239', 'd7b23c5460f059ac82ebc7b2cd992a203e098476', 'v0.0.284'),
('pyramidnet272_a200_bn_cifar100', '1619', '486e942734d91cd62d6bcbc283e1d7b56b734507', 'v0.0.312'),
('pyramidnet272_a200_bn_svhn', '0240', 'dcd9af34f57708f598bba723824bf3525f6e42c7', 'v0.0.404'),
('densenet40_k12_cifar10', '0561', '28dc0035549e51dcb53d1360707bd6f1558a5dcd', 'v0.0.193'),
('densenet40_k12_cifar100', '2490', '908f02ba7dbd7b8138f264193189e762a5590b1c', 'v0.0.195'),
('densenet40_k12_svhn', '0305', '645564c186a4e807293a68fb388803e36916e7b2', 'v0.0.278'),
('densenet40_k12_bc_cifar10', '0643', '7fdeda31c5accbddf47ab0f0b9a32cff723bf70d', 'v0.0.231'),
('densenet40_k12_bc_cifar100', '2841', '35cd8e6a2ae0896a8af2b689e076057fa19efa9b', 'v0.0.232'),
('densenet40_k12_bc_svhn', '0320', '6f2f98243fac9da22be26681bcd0a4d08e0f4baf', 'v0.0.279'),
('densenet40_k24_bc_cifar10', '0452', '13fa807e095b44ecaf3882e488b33a890d9d1e29', 'v0.0.220'),
('densenet40_k24_bc_cifar100', '2267', '2c4ef7c4bbe7f64784ad18b3845f4bf533f2ce57', 'v0.0.221'),
('densenet40_k24_bc_svhn', '0290', '03e136dd71bc85966fd2a4cb15692cfff3886df2', 'v0.0.280'),
('densenet40_k36_bc_cifar10', '0404', '4c154567e25619994a2f86371afbf1ad1e7475e9', 'v0.0.224'),
('densenet40_k36_bc_cifar100', '2050', 'd7275d39bcf439151c3bbeb707efa54943714b03', 'v0.0.225'),
('densenet40_k36_bc_svhn', '0260', 'b81ec8d662937851beecc62f36209fd8db464265', 'v0.0.311'),
('densenet100_k12_cifar10', '0366', '4e371ccb315d0fcd727a76255ca62ae9e92059cc', 'v0.0.205'),
('densenet100_k12_cifar100', '1964', '2ed5ec27a4d4a63876a4cacf52be53c91fbecb5f', 'v0.0.206'),
('densenet100_k12_svhn', '0260', '3e2b34b2087fe507a3672bfce1520747fca58046', 'v0.0.311'),
('densenet100_k24_cifar10', '0313', '9f795bac946d1390cf59f686b730fe512c406bd2', 'v0.0.252'),
('densenet100_k24_cifar100', '1808', '9bfa3e9c736a80906d163380cb361b940c2188bf', 'v0.0.318'),
('densenet100_k12_bc_cifar10', '0416', '6685d1f4844b092471f7d03dfc3fa64a302008e6', 'v0.0.189'),
('densenet100_k12_bc_cifar100', '2119', 'fbd8a54c1c9e4614f950b8473f8524d25caba4a7', 'v0.0.208'),
('densenet190_k40_bc_cifar10', '0252', '87b15be0620c0adff249d33540c20314188b16d7', 'v0.0.286'),
('densenet250_k24_bc_cifar10', '0267', 'dad68693d83a276d14a87dce6cebc5aceebca775', 'v0.0.290'),
('densenet250_k24_bc_cifar100', '1739', '598e91b7906f427296ab72cf40032f0846a52d91', 'v0.0.303'),
('xdensenet40_2_k24_bc_cifar10', '0531', '66c9d384d3ef4ec4095c9759bb8b7986f2f58e26', 'v0.0.226'),
('xdensenet40_2_k24_bc_cifar100', '2396', '73d5ba88a39b971457b9cea2cd72d1e05ab4d165', 'v0.0.227'),
('xdensenet40_2_k24_bc_svhn', '0287', '745f374b398bce378903af8c71cb3c67f6891d7f', 'v0.0.306'),
('xdensenet40_2_k36_bc_cifar10', '0437', 'e9bf419295f833b56fa3da27218107ed42310307', 'v0.0.233'),
('xdensenet40_2_k36_bc_cifar100', '2165', '78b6e754d90774d7b6ec3d811e6e57192148cfbf', 'v0.0.234'),
('xdensenet40_2_k36_bc_svhn', '0274', '4377e8918c1e008201aafc448f642642474eab14', 'v0.0.306'),
('wrn16_10_cifar10', '0293', 'ecf1c17c0814763095df562cb27d15a5aeb51836', 'v0.0.166'),
('wrn16_10_cifar100', '1895', 'bcb5c89ca71ffc99bc09b861b339724047724659', 'v0.0.204'),
('wrn16_10_svhn', '0278', '76f4e1361f9eca82fa4c2764b530f57280a34cfe', 'v0.0.271'),
('wrn28_10_cifar10', '0239', '16f3c8a249993f23b0f81d9ce3650faef5e455d8', 'v0.0.166'),
('wrn28_10_cifar100', '1788', '67ec43c6e913d43c8936809f04b0780035a24835', 'v0.0.320'),
('wrn28_10_svhn', '0271', 'fcd7a6b03a552b22ec25ee9a3833dc260976a757', 'v0.0.276'),
('wrn40_8_cifar10', '0237', '3b81d261706b751f5b731149b05fa92f500218e8', 'v0.0.166'),
('wrn40_8_cifar100', '1803', '114f6be2d5f8d561a5e3b4106fac30028defe300', 'v0.0.321'),
('wrn40_8_svhn', '0254', 'be7a21da6bc958c79725d7a29502c6a781cc67d9', 'v0.0.277'),
('wrn20_10_1bit_cifar10', '0326', 'c1a8ba4f1e1336a289c4b2eec75e25445b511ca6', 'v0.0.302'),
('wrn20_10_1bit_cifar100', '1904', 'adae01d6bec92d4fe388cddbb7f7eb598b1655d1', 'v0.0.302'),
('wrn20_10_1bit_svhn', '0273', 'ce9f819cf117fa66af112d9cbb0b65568623118d', 'v0.0.302'),
('wrn20_10_32bit_cifar10', '0314', '355496184493a55323c99bad9f79b0803548d373', 'v0.0.302'),
('wrn20_10_32bit_cifar100', '1812', 'd064f38aeaa14e9a2f4e9893ef6cca65615c53f9', 'v0.0.302'),
('wrn20_10_32bit_svhn', '0259', 'd9e8b46e180a34c0a765e22d24741f3849fca13a', 'v0.0.302'),
('ror3_56_cifar10', '0543', 'ee31a69a0503b41878c49d8925ac8e7ee813293b', 'v0.0.228'),
('ror3_56_cifar100', '2549', '4334559313cd9291af3d6ec0df144b21e695228b', 'v0.0.229'),
('ror3_56_svhn', '0269', '56617cf90e0902e88686af14939605c45d1170cf', 'v0.0.287'),
('ror3_110_cifar10', '0435', '0359916596cba01dfa481f105094c1047f592980', 'v0.0.235'),
('ror3_110_cifar100', '2364', 'b8c4d317241f54990180443d7fd9702d79c57ccc', 'v0.0.236'),
('ror3_110_svhn', '0257', '0677b7dfee32659a92719a5a16a7f387a5635f0b', 'v0.0.287'),
('ror3_164_cifar10', '0393', 'cc11aa06d928d0805279baccbf2b82371c31f503', 'v0.0.294'),
('ror3_164_cifar100', '2234', 'eb6a7fb8128240d84843a8e39adb00f606b6e2cf', 'v0.0.294'),
('ror3_164_svhn', '0273', 'b008c1b01386aca1803a1286607c5e1f843fc919', 'v0.0.294'),
('rir_cifar10', '0328', '5bed6f3506055b3ab5c4780a540cfebe014490ec', 'v0.0.292'),
('rir_cifar100', '1923', 'c42563834a971e18eacfc2287585aa2efa8af3eb', 'v0.0.292'),
('rir_svhn', '0268', '1c0718deaef5836efca4d5ded6140f0cd51424ab', 'v0.0.292'),
('shakeshakeresnet20_2x16d_cifar10', '0515', 'a7b8a2f77457e151da5d5ad3b9a2473594fecfc0', 'v0.0.215'),
('shakeshakeresnet20_2x16d_cifar100', '2922', 'e46e31a7d8308b57d9c0687000c40f15623998c2', 'v0.0.247'),
('shakeshakeresnet20_2x16d_svhn', '0317', '7a48fde5e1ccd5ff695892adf7094c15368ec778', 'v0.0.295'),
('shakeshakeresnet26_2x32d_cifar10', '0317', '21e60e626765001aaaf4eb26f7cb8f4a69ea3dc1', 'v0.0.217'),
('shakeshakeresnet26_2x32d_cifar100', '1880', 'bd46a7418374e3b3c844b33e12b09b6a98eb4e6e', 'v0.0.222'),
('shakeshakeresnet26_2x32d_svhn', '0262', 'f1dbb8ef162d9ec56478e2579272f85ed78ad896', 'v0.0.295'),
('diaresnet20_cifar10', '0622', '3e47641d76c1992652d8f973294f4763ecef1987', 'v0.0.340'),
('diaresnet20_cifar100', '2771', '3a58490ea95538ad5809c05739b4362088ea6961', 'v0.0.342'),
('diaresnet20_svhn', '0323', '579535ddc8b7c9becfe9bf97393ab33d9d5e7d0b', 'v0.0.342'),
('diaresnet56_cifar10', '0505', '45df69745c9692168697a7b980ade080ef7af07d', 'v0.0.340'),
('diaresnet56_cifar100', '2435', 'e45b7f281bb63c90104ff79d1519b4785a975a92', 'v0.0.342'),
('diaresnet56_svhn', '0268', '8f2c0574380bf14b0e9711d6370b2898f337cab0', 'v0.0.342'),
('diaresnet110_cifar10', '0410', '56f547ec833f419ea216f51439de50287dfef3c3', 'v0.0.340'),
('diaresnet110_cifar100', '2211', 'e99fad4ef0b2e7f09376beb314d672db7c3b6a55', 'v0.0.342'),
('diaresnet110_svhn', '0247', 'c587ac09f45fd7a29adfc1da62ad50174fd248ec', 'v0.0.342'),
('diaresnet164bn_cifar10', '0350', '533e7c6a30fce31c4f65686782cf761e7913750c', 'v0.0.340'),
('diaresnet164bn_cifar100', '1953', '43fa3821ab72e94187c12f7f950a2343649b3657', 'v0.0.342'),
('diaresnet164bn_svhn', '0244', 'eba062dce4033fd85ff78c2530b363b3768c036e', 'v0.0.342'),
('diapreresnet20_cifar10', '0642', 'ec36098cfbbb889fdd124083e785d0e21ba34792', 'v0.0.343'),
('diapreresnet20_cifar100', '2837', '32f0f1be9aa1da73f8fdb74f27ebaa49e7f9ace6', 'v0.0.343'),
('diapreresnet20_svhn', '0303', 'e33be387b0e71a4b0597558157ffbdb79c6db30c', 'v0.0.343'),
('diapreresnet56_cifar10', '0483', 'cba6950f21643a70b8e61b7197ca9cee9b2d0545', 'v0.0.343'),
('diapreresnet56_cifar100', '2505', 'c9f8bd4380d35e3806e1697c1c8d80bf7341c04e', 'v0.0.343'),
('diapreresnet56_svhn', '0280', '98a2a0bab42ff2605bd2cd4e63280b3631b042cb', 'v0.0.343'),
('diapreresnet110_cifar10', '0425', 'f4eae5abe2edebb1e224f7cf092ba02a873eb781', 'v0.0.343'),
('diapreresnet110_cifar100', '2269', '78d79bab215a5dc7221859d0b2688d040a55afb2', 'v0.0.343'),
('diapreresnet110_svhn', '0242', 'decb3765e92f5620580fe6b440ee2a82811d412e', 'v0.0.343'),
('diapreresnet164bn_cifar10', '0356', '9cf07392dc9714324e470fd50efb92ef286296ac', 'v0.0.343'),
('diapreresnet164bn_cifar100', '1999', '1625154f3cce7e131f25d8ee0b315b3fcc6fb760', 'v0.0.343'),
('diapreresnet164bn_svhn', '0256', '8476c5c9176abf21ea380dd00074b0ec30bbc530', 'v0.0.343'),
('resnet10_cub', '2765', '9dab9a498c380e6b7447827e00996d7cc61cc414', 'v0.0.335'),
('resnet12_cub', '2658', 'a46b8ec2d8dcd66a628dcfcb617acb15ef786b95', 'v0.0.336'),
('resnet14_cub', '2435', '0b9801b2e3aa3908bbc98f50d3ae3e986652742b', 'v0.0.337'),
('resnet16_cub', '2321', '031374ada9830869372a63e132c2477a04425444', 'v0.0.338'),
('resnet18_cub', '2330', 'e72712003928ed70ccf44b953e9cec4f78a75eea', 'v0.0.344'),
('resnet26_cub', '2252', '61cce1ea575f650a7e12a08b1a09335afa6cb605', 'v0.0.345'),
('seresnet10_cub', '2739', '7060c03f78bc60df09288b433eb6117c0e167210', 'v0.0.361'),
('seresnet12_cub', '2604', 'ee095118bde6e05ea102f5b945401a7221b7b7fb', 'v0.0.361'),
('seresnet14_cub', '2363', '5d2049d53c0445d7c66c849a5cd805ce39a37ddb', 'v0.0.361'),
('seresnet16_cub', '2321', '576e58eff57730a516094b8ba79452092187b693', 'v0.0.361'),
('seresnet18_cub', '2308', '3d2496d66efd6a00ca516c1a7a5a091f90043237', 'v0.0.361'),
('seresnet26_cub', '2251', '8d54edb2800b2ff071ee5beedde285eb9553bc22', 'v0.0.361'),
('mobilenet_w1_cub', '2346', 'efcad3dcf1975552f15028255a15f86a16b60987', 'v0.0.346'),
('proxylessnas_mobile_cub', '2188', '36d33231029b466638b3b1f8b2d1392e22d1afa7', 'v0.0.347'),
('ntsnet_cub', '1326', '75ae8cdcf4beb1ab60c1a983c9f143baaebbdea0', 'v0.0.334'),
('pspnet_resnetd101b_voc', '8144', 'e15319bf5428637e7fc00dcd426dd458ac937b08', 'v0.0.297'),
('pspnet_resnetd50b_ade20k', '3687', 'f0dcdf734f8f32a879dec3c4e7fe61d629244030', 'v0.0.297'),
('pspnet_resnetd101b_ade20k', '3797', 'c1280aeab8daa31c0893f7551d70130c2b68214a', 'v0.0.297'),
('pspnet_resnetd101b_cityscapes', '7172', 'd5ad2fa4c4208f439ab0b98267babe0c4d9e6e94', 'v0.0.297'),
('pspnet_resnetd101b_coco', '6741', '87582b79c48c4e995de808ff0cbc162c55b52031', 'v0.0.297'),
('deeplabv3_resnetd101b_voc', '8024', '8ee3099c5c983ef1cc0ce23b23d91db40b2986b8', 'v0.0.298'),
('deeplabv3_resnetd152b_voc', '8120', '88fb315dc3c58a84f325e63105fbfe322932073f', 'v0.0.298'),
('deeplabv3_resnetd50b_ade20k', '3713', '5d5e2f74008ab3637a05b6b1357c9c339296188c', 'v0.0.298'),
('deeplabv3_resnetd101b_ade20k', '3784', '6224836f8f31a00be1718a530a20670136bb3958', 'v0.0.298'),
('deeplabv3_resnetd101b_coco', '6773', '74dc9914078e47feb3ff64fba717d1d4040d8235', 'v0.0.298'),
('deeplabv3_resnetd152b_coco', '6899', 'edd79b4ca095f1674e7a68ee0dc8ed8bcd0b6a26', 'v0.0.298'),
('fcn8sd_resnetd101b_voc', '8040', 'f6c67c75bce4f9a3e17bf555369c0c9332ab5c1f', 'v0.0.299'),
('fcn8sd_resnetd50b_ade20k', '3339', '9856c5ee8186d1ac4b0eb5177c73e76c4cd63bb0', 'v0.0.299'),
('fcn8sd_resnetd101b_ade20k', '3588', '081774b2fb373d7b759cda2160fa0d2599b1c5f1', 'v0.0.299'),
('fcn8sd_resnetd101b_coco', '6011', '05e97cc5f5fcdf1c5ec5c617062d43adfe150d88', 'v0.0.299'),
('icnet_resnetd50b_cityscapes', '6402', '6c8f86a53526ae107e58d5f645bc4de0da9c1bb1', 'v0.0.457'),
('fastscnn_cityscapes', '6576', '9e0d75e56bde8d1643d3ff0053e55114c0a77ee9', 'v0.0.474'),
('sinet_cityscapes', '6031', '47d8ae7824bd297bbf25c2f33e3d4a86a503aefa', 'v0.0.437'),
('bisenet_resnet18_celebamaskhq', '0000', 'd72f0cf3101625bb4265e4cf5ae557b994f84d67', 'v0.0.462'),
('danet_resnetd50b_cityscapes', '6799', '9880a0eb9523ba2c1f98025f7d7115a2a4c1f376', 'v0.0.468'),
('danet_resnetd101b_cityscapes', '6810', 'ea69dcea31f5b250254a226f70df130875ff18b2', 'v0.0.468'),
('alphapose_fastseresnet101b_coco', '7415', '70082a53d3cdeb7ebe4c7d8c16a6a39830f1ed23', 'v0.0.454'),
('simplepose_resnet18_coco', '6631', '5a6198e5103a28faab4e49c687121634c7f7d196', 'v0.0.455'),
('simplepose_resnet50b_coco', '7102', '6315ffa72993eea3f573ae1cc23f84d0275f0fbe', 'v0.0.455'),
('simplepose_resnet101b_coco', '7244', '0491ab951827782492ffbd8fa57aa6dd599c7e9f', 'v0.0.455'),
('simplepose_resnet152b_coco', '7253', '4590c1c555eb54c4e9afdc83fa8a199132afd212', 'v0.0.455'),
('simplepose_resneta50b_coco', '7170', 'fa09a84ee2e085ad6b641c4fe0cc483651861789', 'v0.0.455'),
('simplepose_resneta101b_coco', '7297', '7ddd6cb20bcd626e05e8a627601bf19b704ba6a9', 'v0.0.455'),
('simplepose_resneta152b_coco', '7344', '9ec1a3dc2a23a19cb7f5ac5466cebba8069a0f93', 'v0.0.455'),
('simplepose_mobile_resnet18_coco', '6625', '8ff93eed70ac73503c8c1e346ddd1ade5d9e3edf', 'v0.0.456'),
('simplepose_mobile_resnet50b_coco', '7110', 'e0f2e587ffdf5e074a29f877890b13b99a58c6c2', 'v0.0.456'),
('simplepose_mobile_mobilenet_w1_coco', '6410', '0867e5aa76d5ec37cde08c71de8324a9c2913922', 'v0.0.456'),
('simplepose_mobile_mobilenetv2b_w1_coco', '6374', '07e9c6295a8aa2b7bb9a1d0b7c716141e7ee71dc', 'v0.0.456'),
('simplepose_mobile_mobilenetv3_small_w1_coco', '5434', 'cb837c0e32edec68dc8598d71599ea7404936f96', 'v0.0.456'),
('simplepose_mobile_mobilenetv3_large_w1_coco', '6367', '7ba036a5ade736042531a0fe500ecec368dbf157', 'v0.0.456'),
('lwopenpose2d_mobilenet_cmupan_coco', '3999', 'b4a22e7c2a05e53fe22185002c48e81f76c2d918', 'v0.0.458'),
('lwopenpose3d_mobilenet_cmupan_coco', '3999', '4658738ec27d46ee01f2cad4aa975914e9f7108c', 'v0.0.458'),
('ibppose_coco', '6486', '024d1fafb7471572129ccbb07d662f6d8ccdc758', 'v0.0.459'),
('jasperdr10x5_en', '2190', '1ce0ab1cd891294988d7f34d2153fabd968aa876', 'v0.0.555'),
('jasperdr10x5_en_nr', '1789', '49b0b7718e69265c288c163fa6bbb68eb4db79e4', 'v0.0.555'),
('quartznet5x5_en_ls', '4468', 'b37c8cb2e84b2573ed4364d3786e5d06f36cba92', 'v0.0.555'),
('quartznet15x5_en', '1677', 'bf63d3ffbd117d7e6c36bcd7725c998b8ab06754', 'v0.0.555'),
('quartznet15x5_en_nr', '1774', '5a70b9ec078945392c9e5a4161a36f431b1966ab', 'v0.0.555'),
('quartznet15x5_de', '1166', '5e254c91fe57bb2ed5bae62eb5067fc4922f4184', 'v0.0.555'),
('quartznet15x5_fr', '1388', 'f2236953caa996832118c7a412c839395237b5c2', 'v0.0.555'),
('quartznet15x5_it', '1502', '2df788c3c06415237e254ea91ab2994b35c32e71', 'v0.0.555'),
('quartznet15x5_es', '1295', '117352a802814689275b03a161c537ae91e68a9f', 'v0.0.555'),
('quartznet15x5_ca', '0842', 'da40489e324b6a790638d8ed164be3728f4caf3c', 'v0.0.555'),
('quartznet15x5_pl', '1359', '0df08d125314b5a728cd22b77bb8c6dc2b9c2cd1', 'v0.0.555'),
('quartznet15x5_ru', '1648', 'aecf49e137f8041cebc5fcfe8d1834420f03d98b', 'v0.0.555'),
('quartznet15x5_ru34', '0968', 'ff446c0cca42687330ade49dfb6bac05d7251803', 'v0.0.555'),
]}
imgclsmob_repo_url = 'https://github.com/osmr/imgclsmob'
def get_model_name_suffix_data(model_name):
if model_name not in _model_sha1:
raise ValueError("Pretrained model for {name} is not available.".format(name=model_name))
error, sha1_hash, repo_release_tag = _model_sha1[model_name]
return error, sha1_hash, repo_release_tag
def get_model_file(model_name,
local_model_store_dir_path=os.path.join("~", ".mxnet", "models")):
"""
Return location for the pretrained on local file system. This function will download from online model zoo when
model cannot be found or has mismatch. The root directory will be created if it doesn't exist.
Parameters:
----------
model_name : str
Name of the model.
local_model_store_dir_path : str, default $MXNET_HOME/models
Location for keeping the model parameters.
Returns:
-------
file_path
Path to the requested pretrained model file.
"""
error, sha1_hash, repo_release_tag = get_model_name_suffix_data(model_name)
short_sha1 = sha1_hash[:8]
file_name = "{name}-{error}-{short_sha1}.params".format(
name=model_name,
error=error,
short_sha1=short_sha1)
local_model_store_dir_path = os.path.expanduser(local_model_store_dir_path)
file_path = os.path.join(local_model_store_dir_path, file_name)
if os.path.exists(file_path):
if check_sha1(file_path, sha1_hash):
return file_path
else:
logging.warning("Mismatch in the content of model file detected. Downloading again.")
else:
logging.info("Model file not found. Downloading to {}.".format(file_path))
if not os.path.exists(local_model_store_dir_path):
os.makedirs(local_model_store_dir_path)
zip_file_path = file_path + ".zip"
download(
url="{repo_url}/releases/download/{repo_release_tag}/{file_name}.zip".format(
repo_url=imgclsmob_repo_url,
repo_release_tag=repo_release_tag,
file_name=file_name),
path=zip_file_path,
overwrite=True)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(local_model_store_dir_path)
os.remove(zip_file_path)
if check_sha1(file_path, sha1_hash):
return file_path
else:
raise ValueError("Downloaded file has different hash. Please try again.")
| 61,759 | 83.718793 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/fastseresnet.py | """
Fast-SE-ResNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['FastSEResNet', 'fastseresnet101b']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, SEBlock
from .resnet import ResBlock, ResBottleneck, ResInitBlock
class FastSEResUnit(HybridBlock):
"""
Fast-SE-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
use_se : bool
Whether to use SE-module.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
conv1_stride,
use_se,
**kwargs):
super(FastSEResUnit, self).__init__(**kwargs)
self.use_se = use_se
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
if bottleneck:
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
conv1_stride=conv1_stride)
else:
self.body = ResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.use_se:
self.se = SEBlock(
channels=out_channels,
reduction=1,
use_conv=False)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
if self.use_se:
x = self.se(x)
x = x + identity
x = self.activ(x)
return x
class FastSEResNet(HybridBlock):
"""
Fast-SE-ResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(FastSEResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
use_se = (j == 0)
stage.add(FastSEResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
use_se=use_se))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_fastseresnet(blocks,
bottleneck=None,
conv1_stride=True,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create Fast-SE-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported Fast-SE-ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = FastSEResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def fastseresnet101b(**kwargs):
"""
Fast-SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fastseresnet(blocks=101, conv1_stride=False, model_name="fastseresnet101b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
fastseresnet101b,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != fastseresnet101b or weight_count == 55697960)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 10,500 | 32.336508 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/ibnbresnet.py | """
IBN(b)-ResNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
"""
__all__ = ['IBNbResNet', 'ibnb_resnet50', 'ibnb_resnet101', 'ibnb_resnet152']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block
from .resnet import ResBottleneck
class IBNbConvBlock(HybridBlock):
"""
IBN(b)-ResNet specific convolution block with Instance normalization and ReLU activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
activate=True,
**kwargs):
super(IBNbConvBlock, self).__init__(**kwargs)
self.activate = activate
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
in_channels=in_channels)
self.inst_norm = nn.InstanceNorm(
in_channels=out_channels,
scale=True)
if self.activate:
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.inst_norm(x)
if self.activate:
x = self.activ(x)
return x
def ibnb_conv7x7_block(in_channels,
out_channels,
strides=1,
padding=3,
use_bias=False,
activate=True):
"""
7x7 version of the IBN(b)-ResNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
activate : bool, default True
Whether activate the convolution block.
"""
return IBNbConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=strides,
padding=padding,
use_bias=use_bias,
activate=activate)
class IBNbResUnit(HybridBlock):
"""
IBN(b)-ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_inst_norm : bool
Whether to use instance normalization.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_inst_norm,
bn_use_global_stats,
**kwargs):
super(IBNbResUnit, self).__init__(**kwargs)
self.use_inst_norm = use_inst_norm
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = ResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
conv1_stride=False)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
if self.use_inst_norm:
self.inst_norm = nn.InstanceNorm(
in_channels=out_channels,
scale=True)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
if self.use_inst_norm:
x = self.inst_norm(x)
x = self.activ(x)
return x
class IBNbResInitBlock(HybridBlock):
"""
IBN(b)-ResNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(IBNbResInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = ibnb_conv7x7_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pool(x)
return x
class IBNbResNet(HybridBlock):
"""
IBN(b)-ResNet model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(IBNbResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(IBNbResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
use_inst_norm = (i < 2) and (j == len(channels_per_stage) - 1)
stage.add(IBNbResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_inst_norm=use_inst_norm,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_ibnbresnet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create IBN(b)-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
else:
raise ValueError("Unsupported IBN(b)-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = IBNbResNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def ibnb_resnet50(**kwargs):
"""
IBN(b)-ResNet-50 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibnbresnet(blocks=50, model_name="ibnb_resnet50", **kwargs)
def ibnb_resnet101(**kwargs):
"""
IBN(b)-ResNet-101 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibnbresnet(blocks=101, model_name="ibnb_resnet101", **kwargs)
def ibnb_resnet152(**kwargs):
"""
IBN(b)-ResNet-152 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'
https://arxiv.org/abs/1807.09441.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_ibnbresnet(blocks=152, model_name="ibnb_resnet152", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
ibnb_resnet50,
ibnb_resnet101,
ibnb_resnet152,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ibnb_resnet50 or weight_count == 25558568)
assert (model != ibnb_resnet101 or weight_count == 44550696)
assert (model != ibnb_resnet152 or weight_count == 60194344)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 13,442 | 31.007143 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/polynet.py | """
PolyNet for ImageNet-1K, implemented in Gluon.
Original paper: 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks,'
https://arxiv.org/abs/1611.05725.
"""
__all__ = ['PolyNet', 'polynet']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent
from .common import ConvBlock, conv1x1_block, conv3x3_block, ParametricSequential, ParametricConcurrent
class PolyConv(HybridBlock):
"""
PolyNet specific convolution block. A block that is used inside poly-N (poly-2, poly-3, and so on) modules.
The Convolution layer is shared between all Inception blocks inside a poly-N module. BatchNorm layers are not
shared between Inception blocks and therefore the number of BatchNorm layers is equal to the number of Inception
blocks inside a poly-N module.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
num_blocks : int
Number of blocks (BatchNorm layers).
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
bn_use_global_stats,
num_blocks,
**kwargs):
super(PolyConv, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=False,
in_channels=in_channels)
for i in range(num_blocks):
setattr(self, "bn{}".format(i + 1), nn.BatchNorm(
in_channels=out_channels,
use_global_stats=bn_use_global_stats))
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x, index):
x = self.conv(x)
bn = getattr(self, "bn{}".format(index + 1))
x = bn(x)
x = self.activ(x)
return x
def poly_conv1x1(in_channels,
out_channels,
bn_use_global_stats,
num_blocks):
"""
1x1 version of the PolyNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
num_blocks : int
Number of blocks (BatchNorm layers).
"""
return PolyConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=1,
padding=0,
bn_use_global_stats=bn_use_global_stats,
num_blocks=num_blocks)
class MaxPoolBranch(HybridBlock):
"""
PolyNet specific max pooling branch block.
"""
def __init__(self,
**kwargs):
super(MaxPoolBranch, self).__init__(**kwargs)
with self.name_scope():
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0)
def hybrid_forward(self, F, x):
x = self.pool(x)
return x
class Conv1x1Branch(HybridBlock):
"""
PolyNet specific convolutional 1x1 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(Conv1x1Branch, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
class Conv3x3Branch(HybridBlock):
"""
PolyNet specific convolutional 3x3 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(Conv3x3Branch, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
padding=0,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
class ConvSeqBranch(HybridBlock):
"""
PolyNet specific convolutional sequence branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of tuple of int
List of numbers of output channels.
kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int
List of convolution window sizes.
strides_list : list of tuple of int or tuple of tuple/list of 2 int
List of strides of the convolution.
padding_list : list of tuple of int or tuple of tuple/list of 2 int
List of padding values for convolution layers.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
kernel_size_list,
strides_list,
padding_list,
bn_use_global_stats,
**kwargs):
super(ConvSeqBranch, self).__init__(**kwargs)
assert (len(out_channels_list) == len(kernel_size_list))
assert (len(out_channels_list) == len(strides_list))
assert (len(out_channels_list) == len(padding_list))
with self.name_scope():
self.conv_list = nn.HybridSequential(prefix="")
for i, (out_channels, kernel_size, strides, padding) in enumerate(zip(
out_channels_list, kernel_size_list, strides_list, padding_list)):
self.conv_list.add(ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
def hybrid_forward(self, F, x):
x = self.conv_list(x)
return x
class PolyConvSeqBranch(HybridBlock):
"""
PolyNet specific convolutional sequence branch block with internal PolyNet specific convolution blocks.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of tuple of int
List of numbers of output channels.
kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int
List of convolution window sizes.
strides_list : list of tuple of int or tuple of tuple/list of 2 int
List of strides of the convolution.
padding_list : list of tuple of int or tuple of tuple/list of 2 int
List of padding values for convolution layers.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
num_blocks : int
Number of blocks for PolyConv.
"""
def __init__(self,
in_channels,
out_channels_list,
kernel_size_list,
strides_list,
padding_list,
bn_use_global_stats,
num_blocks,
**kwargs):
super(PolyConvSeqBranch, self).__init__(**kwargs)
assert (len(out_channels_list) == len(kernel_size_list))
assert (len(out_channels_list) == len(strides_list))
assert (len(out_channels_list) == len(padding_list))
with self.name_scope():
self.conv_list = ParametricSequential(prefix="")
for i, (out_channels, kernel_size, strides, padding) in enumerate(zip(
out_channels_list, kernel_size_list, strides_list, padding_list)):
self.conv_list.add(PolyConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
bn_use_global_stats=bn_use_global_stats,
num_blocks=num_blocks))
in_channels = out_channels
def hybrid_forward(self, F, x, index):
x = self.conv_list(x, index)
return x
class TwoWayABlock(HybridBlock):
"""
PolyNet type Inception-A block.
Parameters:
----------
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_use_global_stats,
**kwargs):
super(TwoWayABlock, self).__init__(**kwargs)
in_channels = 384
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(32, 48, 64),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(32, 32),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 1),
bn_use_global_stats=bn_use_global_stats))
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=32,
bn_use_global_stats=bn_use_global_stats))
self.conv = conv1x1_block(
in_channels=128,
out_channels=in_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
x = self.branches(x)
x = self.conv(x)
return x
class TwoWayBBlock(HybridBlock):
"""
PolyNet type Inception-B block.
Parameters:
----------
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_use_global_stats,
**kwargs):
super(TwoWayBBlock, self).__init__(**kwargs)
in_channels = 1152
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(128, 160, 192),
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
bn_use_global_stats=bn_use_global_stats))
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=192,
bn_use_global_stats=bn_use_global_stats))
self.conv = conv1x1_block(
in_channels=384,
out_channels=in_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
x = self.branches(x)
x = self.conv(x)
return x
class TwoWayCBlock(HybridBlock):
"""
PolyNet type Inception-C block.
Parameters:
----------
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_use_global_stats,
**kwargs):
super(TwoWayCBlock, self).__init__(**kwargs)
in_channels = 2048
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 224, 256),
kernel_size_list=(1, (1, 3), (3, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 1), (1, 0)),
bn_use_global_stats=bn_use_global_stats))
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=192,
bn_use_global_stats=bn_use_global_stats))
self.conv = conv1x1_block(
in_channels=448,
out_channels=in_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
x = self.branches(x)
x = self.conv(x)
return x
class PolyPreBBlock(HybridBlock):
"""
PolyNet type PolyResidual-Pre-B block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
num_blocks : int
Number of blocks (BatchNorm layers).
"""
def __init__(self,
bn_use_global_stats,
num_blocks,
**kwargs):
super(PolyPreBBlock, self).__init__(**kwargs)
in_channels = 1152
with self.name_scope():
self.branches = ParametricConcurrent(axis=1, prefix="")
self.branches.add(PolyConvSeqBranch(
in_channels=in_channels,
out_channels_list=(128, 160, 192),
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
bn_use_global_stats=bn_use_global_stats,
num_blocks=num_blocks))
self.branches.add(poly_conv1x1(
in_channels=in_channels,
out_channels=192,
bn_use_global_stats=bn_use_global_stats,
num_blocks=num_blocks))
def hybrid_forward(self, F, x, index):
x = self.branches(x, index)
return x
class PolyPreCBlock(HybridBlock):
"""
PolyNet type PolyResidual-Pre-C block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
num_blocks : int
Number of blocks (BatchNorm layers).
"""
def __init__(self,
bn_use_global_stats,
num_blocks,
**kwargs):
super(PolyPreCBlock, self).__init__(**kwargs)
in_channels = 2048
with self.name_scope():
self.branches = ParametricConcurrent(axis=1, prefix="")
self.branches.add(PolyConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 224, 256),
kernel_size_list=(1, (1, 3), (3, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 1), (1, 0)),
bn_use_global_stats=bn_use_global_stats,
num_blocks=num_blocks))
self.branches.add(poly_conv1x1(
in_channels=in_channels,
out_channels=192,
bn_use_global_stats=bn_use_global_stats,
num_blocks=num_blocks))
def hybrid_forward(self, F, x, index):
x = self.branches(x, index)
return x
def poly_res_b_block(bn_use_global_stats):
"""
PolyNet type PolyResidual-Res-B block.
Parameters:
----------
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
return conv1x1_block(
in_channels=384,
out_channels=1152,
strides=1,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def poly_res_c_block(bn_use_global_stats):
"""
PolyNet type PolyResidual-Res-C block.
Parameters:
----------
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
return conv1x1_block(
in_channels=448,
out_channels=2048,
strides=1,
bn_use_global_stats=bn_use_global_stats,
activation=None)
class MultiResidual(HybridBlock):
"""
Base class for constructing N-way modules (2-way, 3-way, and so on). Actually it is for 2-way modules.
Parameters:
----------
scale : float, default 1.0
Scale value for each residual branch.
res_block : HybridBlock class
Residual branch block.
num_blocks : int
Number of residual branches.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
scale,
res_block,
num_blocks,
bn_use_global_stats,
**kwargs):
super(MultiResidual, self).__init__(**kwargs)
assert (num_blocks >= 1)
self.scale = scale
self.num_blocks = num_blocks
with self.name_scope():
for i in range(num_blocks):
setattr(self, "res_block{}".format(i + 1), res_block(bn_use_global_stats=bn_use_global_stats))
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
out = x
for i in range(self.num_blocks):
res_block = getattr(self, "res_block{}".format(i + 1))
out = out + self.scale * res_block(x)
out = self.activ(out)
return out
class PolyResidual(HybridBlock):
"""
The other base class for constructing N-way poly-modules. Actually it is for 3-way poly-modules.
Parameters:
----------
scale : float, default 1.0
Scale value for each residual branch.
res_block : HybridBlock class
Residual branch block.
num_blocks : int
Number of residual branches.
pre_block : HybridBlock class
Preliminary block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
scale,
res_block,
num_blocks,
pre_block,
bn_use_global_stats,
**kwargs):
super(PolyResidual, self).__init__(**kwargs)
assert (num_blocks >= 1)
self.scale = scale
self.num_blocks = num_blocks
with self.name_scope():
self.pre_block = pre_block(
bn_use_global_stats=bn_use_global_stats,
num_blocks=num_blocks)
for i in range(num_blocks):
setattr(self, "res_block{}".format(i + 1), res_block(bn_use_global_stats=bn_use_global_stats))
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
out = x
for index in range(self.num_blocks):
x = self.pre_block(x, index)
res_block = getattr(self, "res_block{}".format(index + 1))
x = res_block(x)
out = out + self.scale * x
x = self.activ(x)
out = self.activ(out)
return out
class PolyBaseUnit(HybridBlock):
"""
PolyNet unit base class.
Parameters:
----------
two_way_scale : float
Scale value for 2-way stage.
two_way_block : HybridBlock class
Residual branch block for 2-way-stage.
poly_scale : float, default 0.0
Scale value for 2-way stage.
poly_res_block : HybridBlock class, default None
Residual branch block for poly-stage.
poly_pre_block : HybridBlock class, default None
Preliminary branch block for poly-stage.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
two_way_scale,
two_way_block,
poly_scale=0.0,
poly_res_block=None,
poly_pre_block=None,
bn_use_global_stats=False,
**kwargs):
super(PolyBaseUnit, self).__init__(**kwargs)
with self.name_scope():
if poly_res_block is not None:
assert (poly_scale != 0.0)
assert (poly_pre_block is not None)
self.poly = PolyResidual(
scale=poly_scale,
res_block=poly_res_block,
num_blocks=3,
pre_block=poly_pre_block,
bn_use_global_stats=bn_use_global_stats)
else:
assert (poly_scale == 0.0)
assert (poly_pre_block is None)
self.poly = None
self.twoway = MultiResidual(
scale=two_way_scale,
res_block=two_way_block,
num_blocks=2,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
if self.poly is not None:
x = self.poly(x)
x = self.twoway(x)
return x
class PolyAUnit(PolyBaseUnit):
"""
PolyNet type A unit.
Parameters:
----------
two_way_scale : float
Scale value for 2-way stage.
poly_scale : float
Scale value for 2-way stage.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
two_way_scale,
poly_scale=0.0,
bn_use_global_stats=False,
**kwargs):
super(PolyAUnit, self).__init__(
two_way_scale=two_way_scale,
two_way_block=TwoWayABlock,
bn_use_global_stats=bn_use_global_stats,
**kwargs)
assert (poly_scale == 0.0)
class PolyBUnit(PolyBaseUnit):
"""
PolyNet type B unit.
Parameters:
----------
two_way_scale : float
Scale value for 2-way stage.
poly_scale : float
Scale value for 2-way stage.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
two_way_scale,
poly_scale,
bn_use_global_stats,
**kwargs):
super(PolyBUnit, self).__init__(
two_way_scale=two_way_scale,
two_way_block=TwoWayBBlock,
poly_scale=poly_scale,
poly_res_block=poly_res_b_block,
poly_pre_block=PolyPreBBlock,
bn_use_global_stats=bn_use_global_stats,
**kwargs)
class PolyCUnit(PolyBaseUnit):
"""
PolyNet type C unit.
Parameters:
----------
two_way_scale : float
Scale value for 2-way stage.
poly_scale : float
Scale value for 2-way stage.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
two_way_scale,
poly_scale,
bn_use_global_stats,
**kwargs):
super(PolyCUnit, self).__init__(
two_way_scale=two_way_scale,
two_way_block=TwoWayCBlock,
poly_scale=poly_scale,
poly_res_block=poly_res_c_block,
poly_pre_block=PolyPreCBlock,
bn_use_global_stats=bn_use_global_stats,
**kwargs)
class ReductionAUnit(HybridBlock):
"""
PolyNet type Reduction-A unit.
Parameters:
----------
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_use_global_stats,
**kwargs):
super(ReductionAUnit, self).__init__(**kwargs)
in_channels = 384
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(256, 256, 384),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(384,),
kernel_size_list=(3,),
strides_list=(2,),
padding_list=(0,),
bn_use_global_stats=bn_use_global_stats))
self.branches.add(MaxPoolBranch())
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class ReductionBUnit(HybridBlock):
"""
PolyNet type Reduction-B unit.
Parameters:
----------
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_use_global_stats,
**kwargs):
super(ReductionBUnit, self).__init__(**kwargs)
in_channels = 1152
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(256, 256, 256),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(256, 256),
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(256, 384),
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_use_global_stats=bn_use_global_stats))
self.branches.add(MaxPoolBranch())
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class PolyBlock3a(HybridBlock):
"""
PolyNet type Mixed-3a block.
Parameters:
----------
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_use_global_stats,
**kwargs):
super(PolyBlock3a, self).__init__(**kwargs)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(MaxPoolBranch())
self.branches.add(Conv3x3Branch(
in_channels=64,
out_channels=96,
bn_use_global_stats=bn_use_global_stats))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class PolyBlock4a(HybridBlock):
"""
PolyNet type Mixed-4a block.
Parameters:
----------
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_use_global_stats,
**kwargs):
super(PolyBlock4a, self).__init__(**kwargs)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(ConvSeqBranch(
in_channels=160,
out_channels_list=(64, 96),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 0),
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=160,
out_channels_list=(64, 64, 64, 96),
kernel_size_list=(1, (7, 1), (1, 7), 3),
strides_list=(1, 1, 1, 1),
padding_list=(0, (3, 0), (0, 3), 0),
bn_use_global_stats=bn_use_global_stats))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class PolyBlock5a(HybridBlock):
"""
PolyNet type Mixed-5a block.
Parameters:
----------
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_use_global_stats,
**kwargs):
super(PolyBlock5a, self).__init__(**kwargs)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(MaxPoolBranch())
self.branches.add(Conv3x3Branch(
in_channels=192,
out_channels=192,
bn_use_global_stats=bn_use_global_stats))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class PolyInitBlock(HybridBlock):
"""
PolyNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_use_global_stats,
**kwargs):
super(PolyInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
strides=2,
padding=0,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
padding=0,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
bn_use_global_stats=bn_use_global_stats)
self.block1 = PolyBlock3a(bn_use_global_stats=bn_use_global_stats)
self.block2 = PolyBlock4a(bn_use_global_stats=bn_use_global_stats)
self.block3 = PolyBlock5a(bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
return x
class PolyNet(HybridBlock):
"""
PolyNet model from 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks,'
https://arxiv.org/abs/1611.05725.
Parameters:
----------
two_way_scales : list of list of floats
Two way scale values for each normal unit.
poly_scales : list of list of floats
Three way scale values for each normal unit.
dropout_rate : float, default 0.2
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (331, 331)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
two_way_scales,
poly_scales,
dropout_rate=0.2,
bn_use_global_stats=False,
in_channels=3,
in_size=(331, 331),
classes=1000,
**kwargs):
super(PolyNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
normal_units = [PolyAUnit, PolyBUnit, PolyCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(PolyInitBlock(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
for i, (two_way_scales_per_stage, poly_scales_per_stage) in enumerate(zip(two_way_scales, poly_scales)):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, (two_way_scale, poly_scale) in enumerate(zip(two_way_scales_per_stage, poly_scales_per_stage)):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
stage.add(unit(bn_use_global_stats=bn_use_global_stats))
else:
unit = normal_units[i]
stage.add(unit(
two_way_scale=two_way_scale,
poly_scale=poly_scale,
bn_use_global_stats=bn_use_global_stats))
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=9,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dropout(rate=dropout_rate))
self.output.add(nn.Dense(
units=classes,
in_units=2048))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_polynet(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create PolyNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
two_way_scales = [
[1.000000, 0.992308, 0.984615, 0.976923, 0.969231, 0.961538, 0.953846, 0.946154, 0.938462, 0.930769],
[0.000000, 0.915385, 0.900000, 0.884615, 0.869231, 0.853846, 0.838462, 0.823077, 0.807692, 0.792308, 0.776923],
[0.000000, 0.761538, 0.746154, 0.730769, 0.715385, 0.700000]]
poly_scales = [
[0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000, 0.000000],
[0.000000, 0.923077, 0.907692, 0.892308, 0.876923, 0.861538, 0.846154, 0.830769, 0.815385, 0.800000, 0.784615],
[0.000000, 0.769231, 0.753846, 0.738462, 0.723077, 0.707692]]
net = PolyNet(
two_way_scales=two_way_scales,
poly_scales=poly_scales,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def polynet(**kwargs):
"""
PolyNet model from 'PolyNet: A Pursuit of Structural Diversity in Very Deep Networks,'
https://arxiv.org/abs/1611.05725.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_polynet(model_name="polynet", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
polynet,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != polynet or weight_count == 95366600)
x = mx.nd.zeros((1, 3, 331, 331), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 38,413 | 32.52007 | 122 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/resnet_cifar.py | """
ResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['CIFARResNet', 'resnet20_cifar10', 'resnet20_cifar100', 'resnet20_svhn',
'resnet56_cifar10', 'resnet56_cifar100', 'resnet56_svhn',
'resnet110_cifar10', 'resnet110_cifar100', 'resnet110_svhn',
'resnet164bn_cifar10', 'resnet164bn_cifar100', 'resnet164bn_svhn',
'resnet272bn_cifar10', 'resnet272bn_cifar100', 'resnet272bn_svhn',
'resnet542bn_cifar10', 'resnet542bn_cifar100', 'resnet542bn_svhn',
'resnet1001_cifar10', 'resnet1001_cifar100', 'resnet1001_svhn',
'resnet1202_cifar10', 'resnet1202_cifar100', 'resnet1202_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3_block
from .resnet import ResUnit
class CIFARResNet(HybridBlock):
"""
ResNet model for CIFAR from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=False))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_resnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def resnet20_cifar10(classes=10, **kwargs):
"""
ResNet-20 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resnet20_cifar10", **kwargs)
def resnet20_cifar100(classes=100, **kwargs):
"""
ResNet-20 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resnet20_cifar100", **kwargs)
def resnet20_svhn(classes=10, **kwargs):
"""
ResNet-20 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="resnet20_svhn", **kwargs)
def resnet56_cifar10(classes=10, **kwargs):
"""
ResNet-56 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="resnet56_cifar10", **kwargs)
def resnet56_cifar100(classes=100, **kwargs):
"""
ResNet-56 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="resnet56_cifar100", **kwargs)
def resnet56_svhn(classes=10, **kwargs):
"""
ResNet-56 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="resnet56_svhn", **kwargs)
def resnet110_cifar10(classes=10, **kwargs):
"""
ResNet-110 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="resnet110_cifar10", **kwargs)
def resnet110_cifar100(classes=100, **kwargs):
"""
ResNet-110 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="resnet110_cifar100", **kwargs)
def resnet110_svhn(classes=10, **kwargs):
"""
ResNet-110 model for SVHN from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="resnet110_svhn", **kwargs)
def resnet164bn_cifar10(classes=10, **kwargs):
"""
ResNet-164(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="resnet164bn_cifar10", **kwargs)
def resnet164bn_cifar100(classes=100, **kwargs):
"""
ResNet-164(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="resnet164bn_cifar100", **kwargs)
def resnet164bn_svhn(classes=10, **kwargs):
"""
ResNet-164(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="resnet164bn_svhn", **kwargs)
def resnet272bn_cifar10(classes=10, **kwargs):
"""
ResNet-272(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="resnet272bn_cifar10", **kwargs)
def resnet272bn_cifar100(classes=100, **kwargs):
"""
ResNet-272(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="resnet272bn_cifar100", **kwargs)
def resnet272bn_svhn(classes=10, **kwargs):
"""
ResNet-272(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="resnet272bn_svhn", **kwargs)
def resnet542bn_cifar10(classes=10, **kwargs):
"""
ResNet-542(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="resnet542bn_cifar10", **kwargs)
def resnet542bn_cifar100(classes=100, **kwargs):
"""
ResNet-542(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="resnet542bn_cifar100", **kwargs)
def resnet542bn_svhn(classes=10, **kwargs):
"""
ResNet-272(BN) model for SVHN from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="resnet542bn_svhn", **kwargs)
def resnet1001_cifar10(classes=10, **kwargs):
"""
ResNet-1001 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="resnet1001_cifar10", **kwargs)
def resnet1001_cifar100(classes=100, **kwargs):
"""
ResNet-1001 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="resnet1001_cifar100", **kwargs)
def resnet1001_svhn(classes=10, **kwargs):
"""
ResNet-1001 model for SVHN from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="resnet1001_svhn", **kwargs)
def resnet1202_cifar10(classes=10, **kwargs):
"""
ResNet-1202 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="resnet1202_cifar10", **kwargs)
def resnet1202_cifar100(classes=100, **kwargs):
"""
ResNet-1202 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="resnet1202_cifar100", **kwargs)
def resnet1202_svhn(classes=10, **kwargs):
"""
ResNet-1202 model for SVHN from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="resnet1202_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(resnet20_cifar10, 10),
(resnet20_cifar100, 100),
(resnet20_svhn, 10),
(resnet56_cifar10, 10),
(resnet56_cifar100, 100),
(resnet56_svhn, 10),
(resnet110_cifar10, 10),
(resnet110_cifar100, 100),
(resnet110_svhn, 10),
(resnet164bn_cifar10, 10),
(resnet164bn_cifar100, 100),
(resnet164bn_svhn, 10),
(resnet272bn_cifar10, 10),
(resnet272bn_cifar100, 100),
(resnet272bn_svhn, 10),
(resnet542bn_cifar10, 10),
(resnet542bn_cifar100, 100),
(resnet542bn_svhn, 10),
(resnet1001_cifar10, 10),
(resnet1001_cifar100, 100),
(resnet1001_svhn, 10),
(resnet1202_cifar10, 10),
(resnet1202_cifar100, 100),
(resnet1202_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet20_cifar10 or weight_count == 272474)
assert (model != resnet20_cifar100 or weight_count == 278324)
assert (model != resnet20_svhn or weight_count == 272474)
assert (model != resnet56_cifar10 or weight_count == 855770)
assert (model != resnet56_cifar100 or weight_count == 861620)
assert (model != resnet56_svhn or weight_count == 855770)
assert (model != resnet110_cifar10 or weight_count == 1730714)
assert (model != resnet110_cifar100 or weight_count == 1736564)
assert (model != resnet110_svhn or weight_count == 1730714)
assert (model != resnet164bn_cifar10 or weight_count == 1704154)
assert (model != resnet164bn_cifar100 or weight_count == 1727284)
assert (model != resnet164bn_svhn or weight_count == 1704154)
assert (model != resnet272bn_cifar10 or weight_count == 2816986)
assert (model != resnet272bn_cifar100 or weight_count == 2840116)
assert (model != resnet272bn_svhn or weight_count == 2816986)
assert (model != resnet542bn_cifar10 or weight_count == 5599066)
assert (model != resnet542bn_cifar100 or weight_count == 5622196)
assert (model != resnet542bn_svhn or weight_count == 5599066)
assert (model != resnet1001_cifar10 or weight_count == 10328602)
assert (model != resnet1001_cifar100 or weight_count == 10351732)
assert (model != resnet1001_svhn or weight_count == 10328602)
assert (model != resnet1202_cifar10 or weight_count == 19424026)
assert (model != resnet1202_cifar100 or weight_count == 19429876)
assert (model != resnet1202_svhn or weight_count == 19424026)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 25,521 | 36.09593 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/nasnet.py | """
NASNet-A for ImageNet-1K, implemented in Gluon.
Original paper: 'Learning Transferable Architectures for Scalable Image Recognition,'
https://arxiv.org/abs/1707.07012.
"""
__all__ = ['NASNet', 'nasnet_4a1056', 'nasnet_6a4032', 'nasnet_dual_path_sequential']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, DualPathSequential
class NasDualPathScheme(object):
"""
NASNet specific scheme of dual path response for a block in a DualPathSequential module.
Parameters:
----------
can_skip_input : bool
Whether can skip input for some blocks.
"""
def __init__(self,
can_skip_input):
super(NasDualPathScheme, self).__init__()
self.can_skip_input = can_skip_input
"""
Scheme function.
Parameters:
----------
block : nn.HybridBlock
A block.
x : Tensor
Current processed tensor.
x_prev : Tensor
Previous processed tensor.
Returns:
-------
x_next : Tensor
Next processed tensor.
x : Tensor
Current processed tensor.
"""
def __call__(self,
block,
x,
x_prev):
x_next = block(x, x_prev)
if type(x_next) == tuple:
x_next, x = x_next
if self.can_skip_input and hasattr(block, 'skip_input') and block.skip_input:
x = x_prev
return x_next, x
def nasnet_dual_path_scheme_ordinal(block,
x,
_):
"""
NASNet specific scheme of dual path response for an ordinal block with dual inputs/outputs in a DualPathSequential
block.
Parameters:
----------
block : nn.HybridBlock
A block.
x : Tensor
Current processed tensor.
Returns:
-------
x_next : Tensor
Next processed tensor.
x : Tensor
Current processed tensor.
"""
return block(x), x
def nasnet_dual_path_sequential(return_two=True,
first_ordinals=0,
last_ordinals=0,
can_skip_input=False,
**kwargs):
"""
NASNet specific dual path sequential container.
Parameters:
----------
return_two : bool, default True
Whether to return two output after execution.
first_ordinals : int, default 0
Number of the first blocks with single input/output.
last_ordinals : int, default 0
Number of the final blocks with single input/output.
dual_path_scheme : function
Scheme of dual path response for a block.
dual_path_scheme_ordinal : function
Scheme of dual path response for an ordinal block.
can_skip_input : bool, default False
Whether can skip input for some blocks.
"""
return DualPathSequential(
return_two=return_two,
first_ordinals=first_ordinals,
last_ordinals=last_ordinals,
dual_path_scheme=NasDualPathScheme(can_skip_input=can_skip_input),
dual_path_scheme_ordinal=nasnet_dual_path_scheme_ordinal,
**kwargs)
def nasnet_batch_norm(channels):
"""
NASNet specific Batch normalization layer.
Parameters:
----------
channels : int
Number of channels in input data.
"""
return nn.BatchNorm(
momentum=0.1,
epsilon=0.001,
in_channels=channels)
def nasnet_avgpool1x1_s2():
"""
NASNet specific 1x1 Average pooling layer with stride 2.
"""
return nn.AvgPool2D(
pool_size=1,
strides=2,
count_include_pad=False)
def nasnet_avgpool3x3_s1():
"""
NASNet specific 3x3 Average pooling layer with stride 1.
"""
return nn.AvgPool2D(
pool_size=3,
strides=1,
padding=1,
count_include_pad=False)
def nasnet_avgpool3x3_s2():
"""
NASNet specific 3x3 Average pooling layer with stride 2.
"""
return nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1,
count_include_pad=False)
def process_with_padding(x,
F,
process=(lambda x: x),
pad_width=(0, 0, 0, 0, 1, 0, 1, 0)):
"""
Auxiliary decorator for layer with NASNet specific extra padding.
Parameters:
----------
x : NDArray
Input tensor.
F : module
Gluon API module.
process : function, default (lambda x: x)
a decorated layer
pad_width : tuple of int, default (0, 0, 0, 0, 1, 0, 1, 0)
Whether the layer uses a bias vector.
Returns:
-------
NDArray
Resulted tensor.
"""
x = F.pad(x, mode="constant", pad_width=pad_width, constant_value=0)
x = process(x)
x = F.slice(x, begin=(None, None, 1, 1), end=(None, None, None, None))
return x
class NasMaxPoolBlock(HybridBlock):
"""
NASNet specific Max pooling layer with extra padding.
Parameters:
----------
extra_padding : bool, default False
Whether to use extra padding.
"""
def __init__(self,
extra_padding=False,
**kwargs):
super(NasMaxPoolBlock, self).__init__(**kwargs)
self.extra_padding = extra_padding
with self.name_scope():
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=1)
def hybrid_forward(self, F, x):
if self.extra_padding:
x = process_with_padding(x, F, self.pool)
else:
x = self.pool(x)
return x
class NasAvgPoolBlock(HybridBlock):
"""
NASNet specific 3x3 Average pooling layer with extra padding.
Parameters:
----------
extra_padding : bool, default False
Whether to use extra padding.
"""
def __init__(self,
extra_padding=False,
**kwargs):
super(NasAvgPoolBlock, self).__init__(**kwargs)
self.extra_padding = extra_padding
with self.name_scope():
self.pool = nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1,
count_include_pad=False)
def hybrid_forward(self, F, x):
if self.extra_padding:
x = process_with_padding(x, F, self.pool)
else:
x = self.pool(x)
return x
class NasConv(HybridBlock):
"""
NASNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int
Number of groups.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
groups,
**kwargs):
super(NasConv, self).__init__(**kwargs)
with self.name_scope():
self.activ = nn.Activation("relu")
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=groups,
use_bias=False,
in_channels=in_channels)
self.bn = nasnet_batch_norm(channels=out_channels)
def hybrid_forward(self, F, x):
x = self.activ(x)
x = self.conv(x)
x = self.bn(x)
return x
def nas_conv1x1(in_channels,
out_channels):
"""
1x1 version of the NASNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
return NasConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=1,
padding=0,
groups=1)
class DwsConv(HybridBlock):
"""
Standard depthwise separable convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
use_bias : bool, default False
Whether the layers use a bias vector.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
use_bias=False,
**kwargs):
super(DwsConv, self).__init__(**kwargs)
with self.name_scope():
self.dw_conv = nn.Conv2D(
channels=in_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=in_channels,
use_bias=use_bias,
in_channels=in_channels)
self.pw_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias)
def hybrid_forward(self, F, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
class NasDwsConv(HybridBlock):
"""
NASNet specific depthwise separable convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
extra_padding : bool, default False
Whether to use extra padding.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
extra_padding=False,
**kwargs):
super(NasDwsConv, self).__init__(**kwargs)
self.extra_padding = extra_padding
with self.name_scope():
self.activ = nn.Activation(activation="relu")
self.conv = DwsConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=False)
self.bn = nasnet_batch_norm(channels=out_channels)
def hybrid_forward(self, F, x):
x = self.activ(x)
if self.extra_padding:
x = process_with_padding(x, F, self.conv)
else:
x = self.conv(x)
x = self.bn(x)
return x
class DwsBranch(HybridBlock):
"""
NASNet specific block with depthwise separable convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
extra_padding=False,
stem=False,
**kwargs):
super(DwsBranch, self).__init__(**kwargs)
assert (not stem) or (not extra_padding)
mid_channels = out_channels if stem else in_channels
with self.name_scope():
self.conv1 = NasDwsConv(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
extra_padding=extra_padding)
self.conv2 = NasDwsConv(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=1,
padding=padding)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
def dws_branch_k3_s1_p1(in_channels,
out_channels,
extra_padding=False):
"""
3x3/1/1 version of the NASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
extra_padding : bool, default False
Whether to use extra padding.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=1,
padding=1,
extra_padding=extra_padding)
def dws_branch_k5_s1_p2(in_channels,
out_channels,
extra_padding=False):
"""
5x5/1/2 version of the NASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
extra_padding : bool, default False
Whether to use extra padding.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=1,
padding=2,
extra_padding=extra_padding)
def dws_branch_k5_s2_p2(in_channels,
out_channels,
extra_padding=False,
stem=False):
"""
5x5/2/2 version of the NASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=2,
padding=2,
extra_padding=extra_padding,
stem=stem)
def dws_branch_k7_s2_p3(in_channels,
out_channels,
extra_padding=False,
stem=False):
"""
7x7/2/3 version of the NASNet specific depthwise separable convolution branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
extra_padding : bool, default False
Whether to use extra padding.
stem : bool, default False
Whether to use squeeze reduction if False.
"""
return DwsBranch(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=2,
padding=3,
extra_padding=extra_padding,
stem=stem)
class NasPathBranch(HybridBlock):
"""
NASNet specific `path` branch (auxiliary block).
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
extra_padding : bool, default False
Whether to use extra padding.
"""
def __init__(self,
in_channels,
out_channels,
extra_padding=False,
**kwargs):
super(NasPathBranch, self).__init__(**kwargs)
self.extra_padding = extra_padding
with self.name_scope():
self.avgpool = nasnet_avgpool1x1_s2()
self.conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels)
def hybrid_forward(self, F, x):
if self.extra_padding:
x = process_with_padding(x, F, pad_width=(0, 0, 0, 0, 0, 1, 0, 1))
x = self.avgpool(x)
x = self.conv(x)
return x
class NasPathBlock(HybridBlock):
"""
NASNet specific `path` block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(NasPathBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.activ = nn.Activation("relu")
self.path1 = NasPathBranch(
in_channels=in_channels,
out_channels=mid_channels)
self.path2 = NasPathBranch(
in_channels=in_channels,
out_channels=mid_channels,
extra_padding=True)
self.bn = nasnet_batch_norm(channels=out_channels)
def hybrid_forward(self, F, x):
x = self.activ(x)
x1 = self.path1(x)
x2 = self.path2(x)
x = F.concat(x1, x2, dim=1)
x = self.bn(x)
return x
class Stem1Unit(HybridBlock):
"""
NASNet Stem1 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(Stem1Unit, self).__init__(**kwargs)
mid_channels = out_channels // 4
with self.name_scope():
self.conv1x1 = nas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.comb0_left = dws_branch_k5_s2_p2(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb0_right = dws_branch_k7_s2_p3(
in_channels=in_channels,
out_channels=mid_channels,
stem=True)
self.comb1_left = NasMaxPoolBlock(extra_padding=False)
self.comb1_right = dws_branch_k7_s2_p3(
in_channels=in_channels,
out_channels=mid_channels,
stem=True)
self.comb2_left = nasnet_avgpool3x3_s2()
self.comb2_right = dws_branch_k5_s2_p2(
in_channels=in_channels,
out_channels=mid_channels,
stem=True)
self.comb3_right = nasnet_avgpool3x3_s1()
self.comb4_left = dws_branch_k3_s1_p1(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb4_right = NasMaxPoolBlock(extra_padding=False)
def hybrid_forward(self, F, x, _=None):
x_left = self.conv1x1(x)
x_right = x
x0 = self.comb0_left(x_left) + self.comb0_right(x_right)
x1 = self.comb1_left(x_left) + self.comb1_right(x_right)
x2 = self.comb2_left(x_left) + self.comb2_right(x_right)
x3 = x1 + self.comb3_right(x0)
x4 = self.comb4_left(x0) + self.comb4_right(x_left)
x_out = F.concat(x1, x2, x3, x4, dim=1)
return x_out
class Stem2Unit(HybridBlock):
"""
NASNet Stem2 unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
extra_padding : bool
Whether to use extra padding.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
extra_padding,
**kwargs):
super(Stem2Unit, self).__init__(**kwargs)
mid_channels = out_channels // 4
with self.name_scope():
self.conv1x1 = nas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.path = NasPathBlock(
in_channels=prev_in_channels,
out_channels=mid_channels)
self.comb0_left = dws_branch_k5_s2_p2(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding)
self.comb0_right = dws_branch_k7_s2_p3(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding)
self.comb1_left = NasMaxPoolBlock(extra_padding=extra_padding)
self.comb1_right = dws_branch_k7_s2_p3(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding)
self.comb2_left = NasAvgPoolBlock(extra_padding=extra_padding)
self.comb2_right = dws_branch_k5_s2_p2(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding)
self.comb3_right = nasnet_avgpool3x3_s1()
self.comb4_left = dws_branch_k3_s1_p1(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding)
self.comb4_right = NasMaxPoolBlock(extra_padding=extra_padding)
def hybrid_forward(self, F, x, x_prev):
x_left = self.conv1x1(x)
x_right = self.path(x_prev)
x0 = self.comb0_left(x_left) + self.comb0_right(x_right)
x1 = self.comb1_left(x_left) + self.comb1_right(x_right)
x2 = self.comb2_left(x_left) + self.comb2_right(x_right)
x3 = x1 + self.comb3_right(x0)
x4 = self.comb4_left(x0) + self.comb4_right(x_left)
x_out = F.concat(x1, x2, x3, x4, dim=1)
return x_out
class FirstUnit(HybridBlock):
"""
NASNet First unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
**kwargs):
super(FirstUnit, self).__init__(**kwargs)
mid_channels = out_channels // 6
with self.name_scope():
self.conv1x1 = nas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.path = NasPathBlock(
in_channels=prev_in_channels,
out_channels=mid_channels)
self.comb0_left = dws_branch_k5_s1_p2(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb0_right = dws_branch_k3_s1_p1(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb1_left = dws_branch_k5_s1_p2(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb1_right = dws_branch_k3_s1_p1(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb2_left = nasnet_avgpool3x3_s1()
self.comb3_left = nasnet_avgpool3x3_s1()
self.comb3_right = nasnet_avgpool3x3_s1()
self.comb4_left = dws_branch_k3_s1_p1(
in_channels=mid_channels,
out_channels=mid_channels)
def hybrid_forward(self, F, x, x_prev):
x_left = self.conv1x1(x)
x_right = self.path(x_prev)
x0 = self.comb0_left(x_left) + self.comb0_right(x_right)
x1 = self.comb1_left(x_right) + self.comb1_right(x_right)
x2 = self.comb2_left(x_left) + x_right
x3 = self.comb3_left(x_right) + self.comb3_right(x_right)
x4 = self.comb4_left(x_left) + x_left
x_out = F.concat(x_right, x0, x1, x2, x3, x4, dim=1)
return x_out
class NormalUnit(HybridBlock):
"""
NASNet Normal unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
**kwargs):
super(NormalUnit, self).__init__(**kwargs)
mid_channels = out_channels // 6
with self.name_scope():
self.conv1x1_prev = nas_conv1x1(
in_channels=prev_in_channels,
out_channels=mid_channels)
self.conv1x1 = nas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.comb0_left = dws_branch_k5_s1_p2(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb0_right = dws_branch_k3_s1_p1(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb1_left = dws_branch_k5_s1_p2(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb1_right = dws_branch_k3_s1_p1(
in_channels=mid_channels,
out_channels=mid_channels)
self.comb2_left = nasnet_avgpool3x3_s1()
self.comb3_left = nasnet_avgpool3x3_s1()
self.comb3_right = nasnet_avgpool3x3_s1()
self.comb4_left = dws_branch_k3_s1_p1(
in_channels=mid_channels,
out_channels=mid_channels)
def hybrid_forward(self, F, x, x_prev):
x_left = self.conv1x1(x)
x_right = self.conv1x1_prev(x_prev)
x0 = self.comb0_left(x_left) + self.comb0_right(x_right)
x1 = self.comb1_left(x_right) + self.comb1_right(x_right)
x2 = self.comb2_left(x_left) + x_right
x3 = self.comb3_left(x_right) + self.comb3_right(x_right)
x4 = self.comb4_left(x_left) + x_left
x_out = F.concat(x_right, x0, x1, x2, x3, x4, dim=1)
return x_out
class ReductionBaseUnit(HybridBlock):
"""
NASNet Reduction base unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
extra_padding : bool, default True
Whether to use extra padding.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
extra_padding=True,
**kwargs):
super(ReductionBaseUnit, self).__init__(**kwargs)
self.skip_input = True
mid_channels = out_channels // 4
with self.name_scope():
self.conv1x1_prev = nas_conv1x1(
in_channels=prev_in_channels,
out_channels=mid_channels)
self.conv1x1 = nas_conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.comb0_left = dws_branch_k5_s2_p2(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding)
self.comb0_right = dws_branch_k7_s2_p3(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding)
self.comb1_left = NasMaxPoolBlock(extra_padding=extra_padding)
self.comb1_right = dws_branch_k7_s2_p3(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding)
self.comb2_left = NasAvgPoolBlock(extra_padding=extra_padding)
self.comb2_right = dws_branch_k5_s2_p2(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding)
self.comb3_right = nasnet_avgpool3x3_s1()
self.comb4_left = dws_branch_k3_s1_p1(
in_channels=mid_channels,
out_channels=mid_channels,
extra_padding=extra_padding)
self.comb4_right = NasMaxPoolBlock(extra_padding=extra_padding)
def hybrid_forward(self, F, x, x_prev):
x_left = self.conv1x1(x)
x_right = self.conv1x1_prev(x_prev)
x0 = self.comb0_left(x_left) + self.comb0_right(x_right)
x1 = self.comb1_left(x_left) + self.comb1_right(x_right)
x2 = self.comb2_left(x_left) + self.comb2_right(x_right)
x3 = x1 + self.comb3_right(x0)
x4 = self.comb4_left(x0) + self.comb4_right(x_left)
x_out = F.concat(x1, x2, x3, x4, dim=1)
return x_out
class Reduction1Unit(ReductionBaseUnit):
"""
NASNet Reduction1 unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels):
super(Reduction1Unit, self).__init__(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels,
extra_padding=True)
class Reduction2Unit(ReductionBaseUnit):
"""
NASNet Reduction2 unit.
Parameters:
----------
in_channels : int
Number of input channels.
prev_in_channels : int
Number of input channels in previous input.
out_channels : int
Number of output channels.
extra_padding : bool
Whether to use extra padding.
"""
def __init__(self,
in_channels,
prev_in_channels,
out_channels,
extra_padding):
super(Reduction2Unit, self).__init__(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels,
extra_padding=extra_padding)
class NASNetInitBlock(HybridBlock):
"""
NASNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(NASNetInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = nn.Conv2D(
channels=out_channels,
kernel_size=3,
strides=2,
padding=0,
use_bias=False,
in_channels=in_channels)
self.bn = nasnet_batch_norm(channels=out_channels)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.bn(x)
return x
class NASNet(HybridBlock):
"""
NASNet-A model from 'Learning Transferable Architectures for Scalable Image Recognition,'
https://arxiv.org/abs/1707.07012.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
stem_blocks_channels : list of 2 int
Number of output channels for the Stem units.
final_pool_size : int
Size of the pooling windows for final pool.
extra_padding : bool
Whether to use extra padding.
skip_reduction_layer_input : bool
Whether to skip the reduction layers when calculating the previous layer to connect to.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
stem_blocks_channels,
final_pool_size,
extra_padding,
skip_reduction_layer_input,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(NASNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
reduction_units = [Reduction1Unit, Reduction2Unit]
with self.name_scope():
self.features = nasnet_dual_path_sequential(
return_two=False,
first_ordinals=1,
last_ordinals=2,
prefix="")
self.features.add(NASNetInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
out_channels = stem_blocks_channels[0]
self.features.add(Stem1Unit(
in_channels=in_channels,
out_channels=out_channels))
prev_in_channels = in_channels
in_channels = out_channels
out_channels = stem_blocks_channels[1]
self.features.add(Stem2Unit(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels,
extra_padding=extra_padding))
prev_in_channels = in_channels
in_channels = out_channels
for i, channels_per_stage in enumerate(channels):
stage = nasnet_dual_path_sequential(
can_skip_input=skip_reduction_layer_input,
prefix='stage{}_'.format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
elif ((i == 0) and (j == 0)) or ((i != 0) and (j == 1)):
unit = FirstUnit
else:
unit = NormalUnit
if unit == Reduction2Unit:
stage.add(Reduction2Unit(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels,
extra_padding=extra_padding))
else:
stage.add(unit(
in_channels=in_channels,
prev_in_channels=prev_in_channels,
out_channels=out_channels))
prev_in_channels = in_channels
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.Activation("relu"))
self.features.add(nn.AvgPool2D(
pool_size=final_pool_size,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dropout(rate=0.5))
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_nasnet(repeat,
penultimate_filters,
init_block_channels,
final_pool_size,
extra_padding,
skip_reduction_layer_input,
in_size,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create NASNet-A model with specific parameters.
Parameters:
----------
repeat : int
NNumber of cell repeats.
penultimate_filters : int
Number of filters in the penultimate layer of the network.
init_block_channels : int
Number of output channels for the initial unit.
final_pool_size : int
Size of the pooling windows for final pool.
extra_padding : bool
Whether to use extra padding.
skip_reduction_layer_input : bool
Whether to skip the reduction layers when calculating the previous layer to connect to.
in_size : tuple of two ints
Spatial size of the expected input image.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
stem_blocks_channels = [1, 2]
reduct_channels = [[], [8], [16]]
norm_channels = [6, 12, 24]
channels = [rci + [nci] * repeat for rci, nci in zip(reduct_channels, norm_channels)]
base_channel_chunk = penultimate_filters // channels[-1][-1]
stem_blocks_channels = [(ci * base_channel_chunk) for ci in stem_blocks_channels]
channels = [[(cij * base_channel_chunk) for cij in ci] for ci in channels]
net = NASNet(
channels=channels,
init_block_channels=init_block_channels,
stem_blocks_channels=stem_blocks_channels,
final_pool_size=final_pool_size,
extra_padding=extra_padding,
skip_reduction_layer_input=skip_reduction_layer_input,
in_size=in_size,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def nasnet_4a1056(**kwargs):
"""
NASNet-A 4@1056 (NASNet-A-Mobile) model from 'Learning Transferable Architectures for Scalable Image Recognition,'
https://arxiv.org/abs/1707.07012.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_nasnet(
repeat=4,
penultimate_filters=1056,
init_block_channels=32,
final_pool_size=7,
extra_padding=True,
skip_reduction_layer_input=False,
in_size=(224, 224),
model_name="nasnet_4a1056",
**kwargs)
def nasnet_6a4032(**kwargs):
"""
NASNet-A 6@4032 (NASNet-A-Large) model from 'Learning Transferable Architectures for Scalable Image Recognition,'
https://arxiv.org/abs/1707.07012.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_nasnet(
repeat=6,
penultimate_filters=4032,
init_block_channels=96,
final_pool_size=11,
extra_padding=False,
skip_reduction_layer_input=True,
in_size=(331, 331),
model_name="nasnet_6a4032",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
nasnet_4a1056,
nasnet_6a4032,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != nasnet_4a1056 or weight_count == 5289978)
assert (model != nasnet_6a4032 or weight_count == 88753150)
x = mx.nd.zeros((1, 3, net.in_size[0], net.in_size[1]), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 41,341 | 29.443299 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/resnext_cifar.py | """
ResNeXt for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431.
"""
__all__ = ['CIFARResNeXt', 'resnext20_1x64d_cifar10', 'resnext20_1x64d_cifar100', 'resnext20_1x64d_svhn',
'resnext20_2x32d_cifar10', 'resnext20_2x32d_cifar100', 'resnext20_2x32d_svhn',
'resnext20_2x64d_cifar10', 'resnext20_2x64d_cifar100', 'resnext20_2x64d_svhn',
'resnext20_4x16d_cifar10', 'resnext20_4x16d_cifar100', 'resnext20_4x16d_svhn',
'resnext20_4x32d_cifar10', 'resnext20_4x32d_cifar100', 'resnext20_4x32d_svhn',
'resnext20_8x8d_cifar10', 'resnext20_8x8d_cifar100', 'resnext20_8x8d_svhn',
'resnext20_8x16d_cifar10', 'resnext20_8x16d_cifar100', 'resnext20_8x16d_svhn',
'resnext20_16x4d_cifar10', 'resnext20_16x4d_cifar100', 'resnext20_16x4d_svhn',
'resnext20_16x8d_cifar10', 'resnext20_16x8d_cifar100', 'resnext20_16x8d_svhn',
'resnext20_32x2d_cifar10', 'resnext20_32x2d_cifar100', 'resnext20_32x2d_svhn',
'resnext20_32x4d_cifar10', 'resnext20_32x4d_cifar100', 'resnext20_32x4d_svhn',
'resnext20_64x1d_cifar10', 'resnext20_64x1d_cifar100', 'resnext20_64x1d_svhn',
'resnext20_64x2d_cifar10', 'resnext20_64x2d_cifar100', 'resnext20_64x2d_svhn',
'resnext29_32x4d_cifar10', 'resnext29_32x4d_cifar100', 'resnext29_32x4d_svhn',
'resnext29_16x64d_cifar10', 'resnext29_16x64d_cifar100', 'resnext29_16x64d_svhn',
'resnext56_1x64d_cifar10', 'resnext56_1x64d_cifar100', 'resnext56_1x64d_svhn',
'resnext56_2x32d_cifar10', 'resnext56_2x32d_cifar100', 'resnext56_2x32d_svhn',
'resnext56_4x16d_cifar10', 'resnext56_4x16d_cifar100', 'resnext56_4x16d_svhn',
'resnext56_8x8d_cifar10', 'resnext56_8x8d_cifar100', 'resnext56_8x8d_svhn',
'resnext56_16x4d_cifar10', 'resnext56_16x4d_cifar100', 'resnext56_16x4d_svhn',
'resnext56_32x2d_cifar10', 'resnext56_32x2d_cifar100', 'resnext56_32x2d_svhn',
'resnext56_64x1d_cifar10', 'resnext56_64x1d_cifar100', 'resnext56_64x1d_svhn',
'resnext272_1x64d_cifar10', 'resnext272_1x64d_cifar100', 'resnext272_1x64d_svhn',
'resnext272_2x32d_cifar10', 'resnext272_2x32d_cifar100', 'resnext272_2x32d_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3_block
from .resnext import ResNeXtUnit
class CIFARResNeXt(HybridBlock):
"""
ResNeXt model for CIFAR from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARResNeXt, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ResNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_resnext_cifar(classes,
blocks,
cardinality,
bottleneck_width,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
ResNeXt model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (blocks - 2) % 9 == 0
layers = [(blocks - 2) // 9] * 3
channels_per_layers = [256, 512, 1024]
init_block_channels = 64
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = CIFARResNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def resnext20_1x64d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (1x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=1, bottleneck_width=64,
model_name="resnext20_1x64d_cifar10", **kwargs)
def resnext20_1x64d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (1x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=1, bottleneck_width=64,
model_name="resnext20_1x64d_cifar100", **kwargs)
def resnext20_1x64d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (1x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=1, bottleneck_width=64,
model_name="resnext20_1x64d_svhn", **kwargs)
def resnext20_2x32d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (2x32d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=32,
model_name="resnext20_2x32d_cifar10", **kwargs)
def resnext20_2x32d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (2x32d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=32,
model_name="resnext20_2x32d_cifar100", **kwargs)
def resnext20_2x32d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (2x32d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=32,
model_name="resnext20_2x32d_svhn", **kwargs)
def resnext20_2x64d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (2x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=64,
model_name="resnext20_2x64d_cifar10", **kwargs)
def resnext20_2x64d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (2x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=64,
model_name="resnext20_2x64d_cifar100", **kwargs)
def resnext20_2x64d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (2x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=64,
model_name="resnext20_2x64d_svhn", **kwargs)
def resnext20_4x16d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (4x16d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=4, bottleneck_width=16,
model_name="resnext20_4x16d_cifar10", **kwargs)
def resnext20_4x16d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (4x16d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=4, bottleneck_width=16,
model_name="resnext20_4x16d_cifar100", **kwargs)
def resnext20_4x16d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (4x16d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=4, bottleneck_width=16,
model_name="resnext20_4x16d_svhn", **kwargs)
def resnext20_4x32d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (4x32d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=4, bottleneck_width=32,
model_name="resnext20_4x32d_cifar10", **kwargs)
def resnext20_4x32d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (4x32d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=4, bottleneck_width=32,
model_name="resnext20_4x32d_cifar100", **kwargs)
def resnext20_4x32d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (4x32d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=4, bottleneck_width=32,
model_name="resnext20_4x32d_svhn", **kwargs)
def resnext20_8x8d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (8x8d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=8, bottleneck_width=8,
model_name="resnext20_8x8d_cifar10", **kwargs)
def resnext20_8x8d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (8x8d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=8, bottleneck_width=8,
model_name="resnext20_8x8d_cifar100", **kwargs)
def resnext20_8x8d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (8x8d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=8, bottleneck_width=8,
model_name="resnext20_8x8d_svhn", **kwargs)
def resnext20_8x16d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (8x16d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=8, bottleneck_width=16,
model_name="resnext20_8x16d_cifar10", **kwargs)
def resnext20_8x16d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (8x16d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=8, bottleneck_width=16,
model_name="resnext20_8x16d_cifar100", **kwargs)
def resnext20_8x16d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (8x16d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=8, bottleneck_width=16,
model_name="resnext20_8x16d_svhn", **kwargs)
def resnext20_16x4d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (16x4d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=4,
model_name="resnext20_16x4d_cifar10", **kwargs)
def resnext20_16x4d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (16x4d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=4,
model_name="resnext20_16x4d_cifar100", **kwargs)
def resnext20_16x4d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (16x4d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=4,
model_name="resnext20_16x4d_svhn", **kwargs)
def resnext20_16x8d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (16x8d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=8,
model_name="resnext20_16x8d_cifar10", **kwargs)
def resnext20_16x8d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (16x8d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=8,
model_name="resnext20_16x8d_cifar100", **kwargs)
def resnext20_16x8d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (16x8d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=16, bottleneck_width=8,
model_name="resnext20_16x8d_svhn", **kwargs)
def resnext20_32x2d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (32x2d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=2,
model_name="resnext20_32x2d_cifar10", **kwargs)
def resnext20_32x2d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (32x2d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=2,
model_name="resnext20_32x2d_cifar100", **kwargs)
def resnext20_32x2d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (32x2d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=2,
model_name="resnext20_32x2d_svhn", **kwargs)
def resnext20_32x4d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (32x4d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=4,
model_name="resnext20_32x4d_cifar10", **kwargs)
def resnext20_32x4d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (32x4d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=4,
model_name="resnext20_32x4d_cifar100", **kwargs)
def resnext20_32x4d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (32x4d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=32, bottleneck_width=4,
model_name="resnext20_32x4d_svhn", **kwargs)
def resnext20_64x1d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (64x1d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=64, bottleneck_width=1,
model_name="resnext20_64x1d_cifar10", **kwargs)
def resnext20_64x1d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (64x1d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=64, bottleneck_width=1,
model_name="resnext20_64x1d_cifar100", **kwargs)
def resnext20_64x1d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (64x1d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=64, bottleneck_width=1,
model_name="resnext20_64x1d_svhn", **kwargs)
def resnext20_64x2d_cifar10(classes=10, **kwargs):
"""
ResNeXt-20 (64x2d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=64, bottleneck_width=2,
model_name="resnext20_64x2d_cifar10", **kwargs)
def resnext20_64x2d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (64x2d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=64, bottleneck_width=2,
model_name="resnext20_64x2d_cifar100", **kwargs)
def resnext20_64x2d_svhn(classes=10, **kwargs):
"""
ResNeXt-20 (64x1d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=64, bottleneck_width=2,
model_name="resnext20_64x2d_svhn", **kwargs)
def resnext29_32x4d_cifar10(classes=10, **kwargs):
"""
ResNeXt-29 (32x4d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=29, cardinality=32, bottleneck_width=4,
model_name="resnext29_32x4d_cifar10", **kwargs)
def resnext29_32x4d_cifar100(classes=100, **kwargs):
"""
ResNeXt-29 (32x4d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=29, cardinality=32, bottleneck_width=4,
model_name="resnext29_32x4d_cifar100", **kwargs)
def resnext29_32x4d_svhn(classes=10, **kwargs):
"""
ResNeXt-29 (32x4d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=29, cardinality=32, bottleneck_width=4,
model_name="resnext29_32x4d_svhn", **kwargs)
def resnext29_16x64d_cifar10(classes=10, **kwargs):
"""
ResNeXt-29 (16x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=29, cardinality=16, bottleneck_width=64,
model_name="resnext29_16x64d_cifar10", **kwargs)
def resnext29_16x64d_cifar100(classes=100, **kwargs):
"""
ResNeXt-29 (16x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=29, cardinality=16, bottleneck_width=64,
model_name="resnext29_16x64d_cifar100", **kwargs)
def resnext29_16x64d_svhn(classes=10, **kwargs):
"""
ResNeXt-29 (16x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=29, cardinality=16, bottleneck_width=64,
model_name="resnext29_16x64d_svhn", **kwargs)
def resnext56_1x64d_cifar10(classes=10, **kwargs):
"""
ResNeXt-56 (1x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=1, bottleneck_width=64,
model_name="resnext56_1x64d_cifar10", **kwargs)
def resnext56_1x64d_cifar100(classes=100, **kwargs):
"""
ResNeXt-56 (1x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=1, bottleneck_width=64,
model_name="resnext56_1x64d_cifar100", **kwargs)
def resnext56_1x64d_svhn(classes=10, **kwargs):
"""
ResNeXt-56 (1x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=1, bottleneck_width=64,
model_name="resnext56_1x64d_svhn", **kwargs)
def resnext56_2x32d_cifar10(classes=10, **kwargs):
"""
ResNeXt-56 (2x32d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=2, bottleneck_width=32,
model_name="resnext56_2x32d_cifar10", **kwargs)
def resnext56_2x32d_cifar100(classes=100, **kwargs):
"""
ResNeXt-56 (2x32d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=2, bottleneck_width=32,
model_name="resnext56_2x32d_cifar100", **kwargs)
def resnext56_2x32d_svhn(classes=10, **kwargs):
"""
ResNeXt-56 (2x32d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=2, bottleneck_width=32,
model_name="resnext56_2x32d_svhn", **kwargs)
def resnext56_4x16d_cifar10(classes=10, **kwargs):
"""
ResNeXt-56 (4x16d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=4, bottleneck_width=16,
model_name="resnext56_4x16d_cifar10", **kwargs)
def resnext56_4x16d_cifar100(classes=100, **kwargs):
"""
ResNeXt-56 (4x16d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=4, bottleneck_width=16,
model_name="resnext56_4x16d_cifar100", **kwargs)
def resnext56_4x16d_svhn(classes=10, **kwargs):
"""
ResNeXt-56 (4x16d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=4, bottleneck_width=16,
model_name="resnext56_4x16d_svhn", **kwargs)
def resnext56_8x8d_cifar10(classes=10, **kwargs):
"""
ResNeXt-56 (8x8d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=8, bottleneck_width=8,
model_name="resnext56_8x8d_cifar10", **kwargs)
def resnext56_8x8d_cifar100(classes=100, **kwargs):
"""
ResNeXt-56 (8x8d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=8, bottleneck_width=8,
model_name="resnext56_8x8d_cifar100", **kwargs)
def resnext56_8x8d_svhn(classes=10, **kwargs):
"""
ResNeXt-56 (8x8d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=8, bottleneck_width=8,
model_name="resnext56_8x8d_svhn", **kwargs)
def resnext56_16x4d_cifar10(classes=10, **kwargs):
"""
ResNeXt-56 (16x4d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=16, bottleneck_width=4,
model_name="resnext56_16x4d_cifar10", **kwargs)
def resnext56_16x4d_cifar100(classes=100, **kwargs):
"""
ResNeXt-56 (16x4d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=16, bottleneck_width=4,
model_name="resnext56_16x4d_cifar100", **kwargs)
def resnext56_16x4d_svhn(classes=10, **kwargs):
"""
ResNeXt-56 (16x4d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=16, bottleneck_width=4,
model_name="resnext56_16x4d_svhn", **kwargs)
def resnext56_32x2d_cifar10(classes=10, **kwargs):
"""
ResNeXt-56 (32x2d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=32, bottleneck_width=2,
model_name="resnext56_32x2d_cifar10", **kwargs)
def resnext56_32x2d_cifar100(classes=100, **kwargs):
"""
ResNeXt-56 (32x2d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=32, bottleneck_width=2,
model_name="resnext56_32x2d_cifar100", **kwargs)
def resnext56_32x2d_svhn(classes=10, **kwargs):
"""
ResNeXt-56 (32x2d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=32, bottleneck_width=2,
model_name="resnext56_32x2d_svhn", **kwargs)
def resnext56_64x1d_cifar10(classes=10, **kwargs):
"""
ResNeXt-56 (64x1d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=64, bottleneck_width=1,
model_name="resnext56_64x1d_cifar10", **kwargs)
def resnext56_64x1d_cifar100(classes=100, **kwargs):
"""
ResNeXt-56 (64x1d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=64, bottleneck_width=1,
model_name="resnext56_64x1d_cifar100", **kwargs)
def resnext56_64x1d_svhn(classes=10, **kwargs):
"""
ResNeXt-56 (64x1d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=56, cardinality=64, bottleneck_width=1,
model_name="resnext56_64x1d_svhn", **kwargs)
def resnext272_1x64d_cifar10(classes=10, **kwargs):
"""
ResNeXt-272 (1x64d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=272, cardinality=1, bottleneck_width=64,
model_name="resnext272_1x64d_cifar10", **kwargs)
def resnext272_1x64d_cifar100(classes=100, **kwargs):
"""
ResNeXt-272 (1x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=272, cardinality=1, bottleneck_width=64,
model_name="resnext272_1x64d_cifar100", **kwargs)
def resnext272_1x64d_svhn(classes=10, **kwargs):
"""
ResNeXt-272 (1x64d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=272, cardinality=1, bottleneck_width=64,
model_name="resnext272_1x64d_svhn", **kwargs)
def resnext272_2x32d_cifar10(classes=10, **kwargs):
"""
ResNeXt-272 (2x32d) model for CIFAR-10 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=272, cardinality=2, bottleneck_width=32,
model_name="resnext272_2x32d_cifar10", **kwargs)
def resnext272_2x32d_cifar100(classes=100, **kwargs):
"""
ResNeXt-272 (2x32d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=272, cardinality=2, bottleneck_width=32,
model_name="resnext272_2x32d_cifar100", **kwargs)
def resnext272_2x32d_svhn(classes=10, **kwargs):
"""
ResNeXt-272 (2x32d) model for SVHN from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=272, cardinality=2, bottleneck_width=32,
model_name="resnext272_2x32d_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(resnext20_1x64d_cifar10, 10),
(resnext20_1x64d_cifar100, 100),
(resnext20_1x64d_svhn, 10),
(resnext20_2x32d_cifar10, 10),
(resnext20_2x32d_cifar100, 100),
(resnext20_2x32d_svhn, 10),
(resnext20_2x64d_cifar10, 10),
(resnext20_2x64d_cifar100, 100),
(resnext20_2x64d_svhn, 10),
(resnext20_4x16d_cifar10, 10),
(resnext20_4x16d_cifar100, 100),
(resnext20_4x16d_svhn, 10),
(resnext20_4x32d_cifar10, 10),
(resnext20_4x32d_cifar100, 100),
(resnext20_4x32d_svhn, 10),
(resnext20_8x8d_cifar10, 10),
(resnext20_8x8d_cifar100, 100),
(resnext20_8x8d_svhn, 10),
(resnext20_8x16d_cifar10, 10),
(resnext20_8x16d_cifar100, 100),
(resnext20_8x16d_svhn, 10),
(resnext20_16x4d_cifar10, 10),
(resnext20_16x4d_cifar100, 100),
(resnext20_16x4d_svhn, 10),
(resnext20_16x8d_cifar10, 10),
(resnext20_16x8d_cifar100, 100),
(resnext20_16x8d_svhn, 10),
(resnext20_32x2d_cifar10, 10),
(resnext20_32x2d_cifar100, 100),
(resnext20_32x2d_svhn, 10),
(resnext20_32x4d_cifar10, 10),
(resnext20_32x4d_cifar100, 100),
(resnext20_32x4d_svhn, 10),
(resnext20_64x1d_cifar10, 10),
(resnext20_64x1d_cifar100, 100),
(resnext20_64x1d_svhn, 10),
(resnext20_64x2d_cifar10, 10),
(resnext20_64x2d_cifar100, 100),
(resnext20_64x2d_svhn, 10),
(resnext29_32x4d_cifar10, 10),
(resnext29_32x4d_cifar100, 100),
(resnext29_32x4d_svhn, 10),
(resnext29_16x64d_cifar10, 10),
(resnext29_16x64d_cifar100, 100),
(resnext29_16x64d_svhn, 10),
(resnext56_1x64d_cifar10, 10),
(resnext56_1x64d_cifar100, 100),
(resnext56_1x64d_svhn, 10),
(resnext56_2x32d_cifar10, 10),
(resnext56_2x32d_cifar100, 100),
(resnext56_2x32d_svhn, 10),
(resnext56_4x16d_cifar10, 10),
(resnext56_4x16d_cifar100, 100),
(resnext56_4x16d_svhn, 10),
(resnext56_8x8d_cifar10, 10),
(resnext56_8x8d_cifar100, 100),
(resnext56_8x8d_svhn, 10),
(resnext56_16x4d_cifar10, 10),
(resnext56_16x4d_cifar100, 100),
(resnext56_16x4d_svhn, 10),
(resnext56_32x2d_cifar10, 10),
(resnext56_32x2d_cifar100, 100),
(resnext56_32x2d_svhn, 10),
(resnext56_64x1d_cifar10, 10),
(resnext56_64x1d_cifar100, 100),
(resnext56_64x1d_svhn, 10),
(resnext272_1x64d_cifar10, 10),
(resnext272_1x64d_cifar100, 100),
(resnext272_1x64d_svhn, 10),
(resnext272_2x32d_cifar10, 10),
(resnext272_2x32d_cifar100, 100),
(resnext272_2x32d_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnext20_1x64d_cifar10 or weight_count == 3446602)
assert (model != resnext20_1x64d_cifar100 or weight_count == 3538852)
assert (model != resnext20_1x64d_svhn or weight_count == 3446602)
assert (model != resnext20_2x32d_cifar10 or weight_count == 2672458)
assert (model != resnext20_2x32d_cifar100 or weight_count == 2764708)
assert (model != resnext20_2x32d_svhn or weight_count == 2672458)
assert (model != resnext20_2x64d_cifar10 or weight_count == 6198602)
assert (model != resnext20_2x64d_cifar100 or weight_count == 6290852)
assert (model != resnext20_2x64d_svhn or weight_count == 6198602)
assert (model != resnext20_4x16d_cifar10 or weight_count == 2285386)
assert (model != resnext20_4x16d_cifar100 or weight_count == 2377636)
assert (model != resnext20_4x16d_svhn or weight_count == 2285386)
assert (model != resnext20_4x32d_cifar10 or weight_count == 4650314)
assert (model != resnext20_4x32d_cifar100 or weight_count == 4742564)
assert (model != resnext20_4x32d_svhn or weight_count == 4650314)
assert (model != resnext20_8x8d_cifar10 or weight_count == 2091850)
assert (model != resnext20_8x8d_cifar100 or weight_count == 2184100)
assert (model != resnext20_8x8d_svhn or weight_count == 2091850)
assert (model != resnext20_8x16d_cifar10 or weight_count == 3876170)
assert (model != resnext20_8x16d_cifar100 or weight_count == 3968420)
assert (model != resnext20_8x16d_svhn or weight_count == 3876170)
assert (model != resnext20_16x4d_cifar10 or weight_count == 1995082)
assert (model != resnext20_16x4d_cifar100 or weight_count == 2087332)
assert (model != resnext20_16x4d_svhn or weight_count == 1995082)
assert (model != resnext20_16x8d_cifar10 or weight_count == 3489098)
assert (model != resnext20_16x8d_cifar100 or weight_count == 3581348)
assert (model != resnext20_16x8d_svhn or weight_count == 3489098)
assert (model != resnext20_32x2d_cifar10 or weight_count == 1946698)
assert (model != resnext20_32x2d_cifar100 or weight_count == 2038948)
assert (model != resnext20_32x2d_svhn or weight_count == 1946698)
assert (model != resnext20_32x4d_cifar10 or weight_count == 3295562)
assert (model != resnext20_32x4d_cifar100 or weight_count == 3387812)
assert (model != resnext20_32x4d_svhn or weight_count == 3295562)
assert (model != resnext20_64x1d_cifar10 or weight_count == 1922506)
assert (model != resnext20_64x1d_cifar100 or weight_count == 2014756)
assert (model != resnext20_64x1d_svhn or weight_count == 1922506)
assert (model != resnext20_64x2d_cifar10 or weight_count == 3198794)
assert (model != resnext20_64x2d_cifar100 or weight_count == 3291044)
assert (model != resnext20_64x2d_svhn or weight_count == 3198794)
assert (model != resnext29_32x4d_cifar10 or weight_count == 4775754)
assert (model != resnext29_32x4d_cifar100 or weight_count == 4868004)
assert (model != resnext29_32x4d_svhn or weight_count == 4775754)
assert (model != resnext29_16x64d_cifar10 or weight_count == 68155210)
assert (model != resnext29_16x64d_cifar100 or weight_count == 68247460)
assert (model != resnext29_16x64d_svhn or weight_count == 68155210)
assert (model != resnext56_1x64d_cifar10 or weight_count == 9317194)
assert (model != resnext56_1x64d_cifar100 or weight_count == 9409444)
assert (model != resnext56_1x64d_svhn or weight_count == 9317194)
assert (model != resnext56_2x32d_cifar10 or weight_count == 6994762)
assert (model != resnext56_2x32d_cifar100 or weight_count == 7087012)
assert (model != resnext56_2x32d_svhn or weight_count == 6994762)
assert (model != resnext56_4x16d_cifar10 or weight_count == 5833546)
assert (model != resnext56_4x16d_cifar100 or weight_count == 5925796)
assert (model != resnext56_4x16d_svhn or weight_count == 5833546)
assert (model != resnext56_8x8d_cifar10 or weight_count == 5252938)
assert (model != resnext56_8x8d_cifar100 or weight_count == 5345188)
assert (model != resnext56_8x8d_svhn or weight_count == 5252938)
assert (model != resnext56_16x4d_cifar10 or weight_count == 4962634)
assert (model != resnext56_16x4d_cifar100 or weight_count == 5054884)
assert (model != resnext56_16x4d_svhn or weight_count == 4962634)
assert (model != resnext56_32x2d_cifar10 or weight_count == 4817482)
assert (model != resnext56_32x2d_cifar100 or weight_count == 4909732)
assert (model != resnext56_32x2d_svhn or weight_count == 4817482)
assert (model != resnext56_64x1d_cifar10 or weight_count == 4744906)
assert (model != resnext56_64x1d_cifar100 or weight_count == 4837156)
assert (model != resnext56_64x1d_svhn or weight_count == 4744906)
assert (model != resnext272_1x64d_cifar10 or weight_count == 44540746)
assert (model != resnext272_1x64d_cifar100 or weight_count == 44632996)
assert (model != resnext272_1x64d_svhn or weight_count == 44540746)
assert (model != resnext272_2x32d_cifar10 or weight_count == 32928586)
assert (model != resnext272_2x32d_cifar100 or weight_count == 33020836)
assert (model != resnext272_2x32d_svhn or weight_count == 32928586)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 71,735 | 39.075978 | 116 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/densenet_cifar.py | """
DenseNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
"""
__all__ = ['CIFARDenseNet', 'densenet40_k12_cifar10', 'densenet40_k12_cifar100', 'densenet40_k12_svhn',
'densenet40_k12_bc_cifar10', 'densenet40_k12_bc_cifar100', 'densenet40_k12_bc_svhn',
'densenet40_k24_bc_cifar10', 'densenet40_k24_bc_cifar100', 'densenet40_k24_bc_svhn',
'densenet40_k36_bc_cifar10', 'densenet40_k36_bc_cifar100', 'densenet40_k36_bc_svhn',
'densenet100_k12_cifar10', 'densenet100_k12_cifar100', 'densenet100_k12_svhn',
'densenet100_k24_cifar10', 'densenet100_k24_cifar100', 'densenet100_k24_svhn',
'densenet100_k12_bc_cifar10', 'densenet100_k12_bc_cifar100', 'densenet100_k12_bc_svhn',
'densenet190_k40_bc_cifar10', 'densenet190_k40_bc_cifar100', 'densenet190_k40_bc_svhn',
'densenet250_k24_bc_cifar10', 'densenet250_k24_bc_cifar100', 'densenet250_k24_bc_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3, pre_conv3x3_block
from .preresnet import PreResActivation
from .densenet import DenseUnit, TransitionBlock
class DenseSimpleUnit(HybridBlock):
"""
DenseNet simple unit for CIFAR.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
dropout_rate,
**kwargs):
super(DenseSimpleUnit, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
inc_channels = out_channels - in_channels
with self.name_scope():
self.conv = pre_conv3x3_block(
in_channels=in_channels,
out_channels=inc_channels,
bn_use_global_stats=bn_use_global_stats)
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
def hybrid_forward(self, F, x):
identity = x
x = self.conv(x)
if self.use_dropout:
x = self.dropout(x)
x = F.concat(identity, x, dim=1)
return x
class CIFARDenseNet(HybridBlock):
"""
DenseNet model for CIFAR from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
dropout_rate=0.0,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARDenseNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
unit_class = DenseUnit if bottleneck else DenseSimpleUnit
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
if i != 0:
stage.add(TransitionBlock(
in_channels=in_channels,
out_channels=(in_channels // 2),
bn_use_global_stats=bn_use_global_stats))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
stage.add(unit_class(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
dropout_rate=dropout_rate))
in_channels = out_channels
self.features.add(stage)
self.features.add(PreResActivation(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_densenet_cifar(classes,
blocks,
growth_rate,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DenseNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
growth_rate : int
Growth rate.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 4) % 6 == 0)
layers = [(blocks - 4) // 6] * 3
else:
assert ((blocks - 4) % 3 == 0)
layers = [(blocks - 4) // 3] * 3
init_block_channels = 2 * growth_rate
from functools import reduce
channels = reduce(
lambda xi, yi: xi + [reduce(
lambda xj, yj: xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = CIFARDenseNet(
channels=channels,
init_block_channels=init_block_channels,
classes=classes,
bottleneck=bottleneck,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def densenet40_k12_cifar10(classes=10, **kwargs):
"""
DenseNet-40 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=False,
model_name="densenet40_k12_cifar10", **kwargs)
def densenet40_k12_cifar100(classes=100, **kwargs):
"""
DenseNet-40 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=False,
model_name="densenet40_k12_cifar100", **kwargs)
def densenet40_k12_svhn(classes=10, **kwargs):
"""
DenseNet-40 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=False,
model_name="densenet40_k12_svhn", **kwargs)
def densenet40_k12_bc_cifar10(classes=10, **kwargs):
"""
DenseNet-BC-40 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=True,
model_name="densenet40_k12_bc_cifar10", **kwargs)
def densenet40_k12_bc_cifar100(classes=100, **kwargs):
"""
DenseNet-BC-40 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=True,
model_name="densenet40_k12_bc_cifar100", **kwargs)
def densenet40_k12_bc_svhn(classes=10, **kwargs):
"""
DenseNet-BC-40 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=True,
model_name="densenet40_k12_bc_svhn", **kwargs)
def densenet40_k24_bc_cifar10(classes=10, **kwargs):
"""
DenseNet-BC-40 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True,
model_name="densenet40_k24_bc_cifar10", **kwargs)
def densenet40_k24_bc_cifar100(classes=100, **kwargs):
"""
DenseNet-BC-40 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True,
model_name="densenet40_k24_bc_cifar100", **kwargs)
def densenet40_k24_bc_svhn(classes=10, **kwargs):
"""
DenseNet-BC-40 (k=24) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True,
model_name="densenet40_k24_bc_svhn", **kwargs)
def densenet40_k36_bc_cifar10(classes=10, **kwargs):
"""
DenseNet-BC-40 (k=36) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True,
model_name="densenet40_k36_bc_cifar10", **kwargs)
def densenet40_k36_bc_cifar100(classes=100, **kwargs):
"""
DenseNet-BC-40 (k=36) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True,
model_name="densenet40_k36_bc_cifar100", **kwargs)
def densenet40_k36_bc_svhn(classes=10, **kwargs):
"""
DenseNet-BC-40 (k=36) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True,
model_name="densenet40_k36_bc_svhn", **kwargs)
def densenet100_k12_cifar10(classes=10, **kwargs):
"""
DenseNet-100 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=False,
model_name="densenet100_k12_cifar10", **kwargs)
def densenet100_k12_cifar100(classes=100, **kwargs):
"""
DenseNet-100 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=False,
model_name="densenet100_k12_cifar100", **kwargs)
def densenet100_k12_svhn(classes=10, **kwargs):
"""
DenseNet-100 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=False,
model_name="densenet100_k12_svhn", **kwargs)
def densenet100_k24_cifar10(classes=10, **kwargs):
"""
DenseNet-100 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=24, bottleneck=False,
model_name="densenet100_k24_cifar10", **kwargs)
def densenet100_k24_cifar100(classes=100, **kwargs):
"""
DenseNet-100 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=24, bottleneck=False,
model_name="densenet100_k24_cifar100", **kwargs)
def densenet100_k24_svhn(classes=10, **kwargs):
"""
DenseNet-100 (k=24) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=24, bottleneck=False,
model_name="densenet100_k24_svhn", **kwargs)
def densenet100_k12_bc_cifar10(classes=10, **kwargs):
"""
DenseNet-BC-100 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=True,
model_name="densenet100_k12_bc_cifar10", **kwargs)
def densenet100_k12_bc_cifar100(classes=100, **kwargs):
"""
DenseNet-BC-100 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=True,
model_name="densenet100_k12_bc_cifar100", **kwargs)
def densenet100_k12_bc_svhn(classes=10, **kwargs):
"""
DenseNet-BC-100 (k=12) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=True,
model_name="densenet100_k12_bc_svhn", **kwargs)
def densenet190_k40_bc_cifar10(classes=10, **kwargs):
"""
DenseNet-BC-190 (k=40) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=190, growth_rate=40, bottleneck=True,
model_name="densenet190_k40_bc_cifar10", **kwargs)
def densenet190_k40_bc_cifar100(classes=100, **kwargs):
"""
DenseNet-BC-190 (k=40) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=190, growth_rate=40, bottleneck=True,
model_name="densenet190_k40_bc_cifar100", **kwargs)
def densenet190_k40_bc_svhn(classes=10, **kwargs):
"""
DenseNet-BC-190 (k=40) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=190, growth_rate=40, bottleneck=True,
model_name="densenet190_k40_bc_svhn", **kwargs)
def densenet250_k24_bc_cifar10(classes=10, **kwargs):
"""
DenseNet-BC-250 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=250, growth_rate=24, bottleneck=True,
model_name="densenet250_k24_bc_cifar10", **kwargs)
def densenet250_k24_bc_cifar100(classes=100, **kwargs):
"""
DenseNet-BC-250 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=250, growth_rate=24, bottleneck=True,
model_name="densenet250_k24_bc_cifar100", **kwargs)
def densenet250_k24_bc_svhn(classes=10, **kwargs):
"""
DenseNet-BC-250 (k=24) model for SVHN from 'Densely Connected Convolutional Networks,'
https://arxiv.org/abs/1608.06993.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet_cifar(classes=classes, blocks=250, growth_rate=24, bottleneck=True,
model_name="densenet250_k24_bc_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(densenet40_k12_cifar10, 10),
(densenet40_k12_cifar100, 100),
(densenet40_k12_svhn, 10),
(densenet40_k12_bc_cifar10, 10),
(densenet40_k12_bc_cifar100, 100),
(densenet40_k12_bc_svhn, 10),
(densenet40_k24_bc_cifar10, 10),
(densenet40_k24_bc_cifar100, 100),
(densenet40_k24_bc_svhn, 10),
(densenet40_k36_bc_cifar10, 10),
(densenet40_k36_bc_cifar100, 100),
(densenet40_k36_bc_svhn, 10),
(densenet100_k12_cifar10, 10),
(densenet100_k12_cifar100, 100),
(densenet100_k12_svhn, 10),
(densenet100_k24_cifar10, 10),
(densenet100_k24_cifar100, 100),
(densenet100_k24_svhn, 10),
(densenet100_k12_bc_cifar10, 10),
(densenet100_k12_bc_cifar100, 100),
(densenet100_k12_bc_svhn, 10),
(densenet190_k40_bc_cifar10, 10),
(densenet190_k40_bc_cifar100, 100),
(densenet190_k40_bc_svhn, 10),
(densenet250_k24_bc_cifar10, 10),
(densenet250_k24_bc_cifar100, 100),
(densenet250_k24_bc_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != densenet40_k12_cifar10 or weight_count == 599050)
assert (model != densenet40_k12_cifar100 or weight_count == 622360)
assert (model != densenet40_k12_svhn or weight_count == 599050)
assert (model != densenet40_k12_bc_cifar10 or weight_count == 176122)
assert (model != densenet40_k12_bc_cifar100 or weight_count == 188092)
assert (model != densenet40_k12_bc_svhn or weight_count == 176122)
assert (model != densenet40_k24_bc_cifar10 or weight_count == 690346)
assert (model != densenet40_k24_bc_cifar100 or weight_count == 714196)
assert (model != densenet40_k24_bc_svhn or weight_count == 690346)
assert (model != densenet40_k36_bc_cifar10 or weight_count == 1542682)
assert (model != densenet40_k36_bc_cifar100 or weight_count == 1578412)
assert (model != densenet40_k36_bc_svhn or weight_count == 1542682)
assert (model != densenet100_k12_cifar10 or weight_count == 4068490)
assert (model != densenet100_k12_cifar100 or weight_count == 4129600)
assert (model != densenet100_k12_svhn or weight_count == 4068490)
assert (model != densenet100_k24_cifar10 or weight_count == 16114138)
assert (model != densenet100_k24_cifar100 or weight_count == 16236268)
assert (model != densenet100_k24_svhn or weight_count == 16114138)
assert (model != densenet100_k12_bc_cifar10 or weight_count == 769162)
assert (model != densenet100_k12_bc_cifar100 or weight_count == 800032)
assert (model != densenet100_k12_bc_svhn or weight_count == 769162)
assert (model != densenet190_k40_bc_cifar10 or weight_count == 25624430)
assert (model != densenet190_k40_bc_cifar100 or weight_count == 25821620)
assert (model != densenet190_k40_bc_svhn or weight_count == 25624430)
assert (model != densenet250_k24_bc_cifar10 or weight_count == 15324406)
assert (model != densenet250_k24_bc_cifar100 or weight_count == 15480556)
assert (model != densenet250_k24_bc_svhn or weight_count == 15324406)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 32,370 | 37.354265 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/bninception.py | """
BN-Inception for ImageNet-1K, implemented in Gluon.
Original paper: 'Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift,'
https://arxiv.org/abs/1502.03167.
"""
__all__ = ['BNInception', 'bninception']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent
from .common import conv1x1_block, conv3x3_block, conv7x7_block
class Inception3x3Branch(HybridBlock):
"""
BN-Inception 3x3 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of intermediate channels.
strides : int or tuple/list of 2 int, default 1
Strides of the second convolution.
use_bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layers.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
strides=1,
use_bias=True,
use_bn=True,
bn_use_global_stats=False,
**kwargs):
super(Inception3x3Branch, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class InceptionDouble3x3Branch(HybridBlock):
"""
BN-Inception double 3x3 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of intermediate channels.
strides : int or tuple/list of 2 int, default 1
Strides of the second convolution.
use_bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layers.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
strides=1,
use_bias=True,
use_bn=True,
bn_use_global_stats=False,
**kwargs):
super(InceptionDouble3x3Branch, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv3x3_block(
in_channels=out_channels,
out_channels=out_channels,
strides=strides,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class InceptionPoolBranch(HybridBlock):
"""
BN-Inception avg-pool branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
avg_pool : bool
Whether use average pooling or max pooling.
use_bias : bool
Whether the convolution layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layers.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
avg_pool,
use_bias,
use_bn,
bn_use_global_stats,
**kwargs):
super(InceptionPoolBranch, self).__init__(**kwargs)
with self.name_scope():
if avg_pool:
self.pool = nn.AvgPool2D(
pool_size=3,
strides=1,
padding=1,
ceil_mode=True,
count_include_pad=True)
else:
self.pool = nn.MaxPool2D(
pool_size=3,
strides=1,
padding=1,
ceil_mode=True)
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.pool(x)
x = self.conv(x)
return x
class StemBlock(HybridBlock):
"""
BN-Inception stem block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of intermediate channels.
use_bias : bool
Whether the convolution layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layers.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
use_bias,
use_bn,
bn_use_global_stats,
**kwargs):
super(StemBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv7x7_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats)
self.pool1 = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0,
ceil_mode=True)
self.conv2 = Inception3x3Branch(
in_channels=mid_channels,
out_channels=out_channels,
mid_channels=mid_channels,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats)
self.pool2 = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0,
ceil_mode=True)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.pool2(x)
return x
class InceptionBlock(HybridBlock):
"""
BN-Inception unit.
Parameters:
----------
in_channels : int
Number of input channels.
mid1_channels_list : list of int
Number of pre-middle channels for branches.
mid2_channels_list : list of int
Number of middle channels for branches.
avg_pool : bool
Whether use average pooling or max pooling.
use_bias : bool
Whether the convolution layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layers.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
mid1_channels_list,
mid2_channels_list,
avg_pool,
use_bias,
use_bn,
bn_use_global_stats,
**kwargs):
super(InceptionBlock, self).__init__(**kwargs)
assert (len(mid1_channels_list) == 2)
assert (len(mid2_channels_list) == 4)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(conv1x1_block(
in_channels=in_channels,
out_channels=mid2_channels_list[0],
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(Inception3x3Branch(
in_channels=in_channels,
out_channels=mid2_channels_list[1],
mid_channels=mid1_channels_list[0],
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(InceptionDouble3x3Branch(
in_channels=in_channels,
out_channels=mid2_channels_list[2],
mid_channels=mid1_channels_list[1],
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(InceptionPoolBranch(
in_channels=in_channels,
out_channels=mid2_channels_list[3],
avg_pool=avg_pool,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class ReductionBlock(HybridBlock):
"""
BN-Inception reduction block.
Parameters:
----------
in_channels : int
Number of input channels.
mid1_channels_list : list of int
Number of pre-middle channels for branches.
mid2_channels_list : list of int
Number of middle channels for branches.
use_bias : bool
Whether the convolution layer uses a bias vector.
use_bn : bool
Whether to use BatchNorm layers.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
mid1_channels_list,
mid2_channels_list,
use_bias,
use_bn,
bn_use_global_stats,
**kwargs):
super(ReductionBlock, self).__init__(**kwargs)
assert (len(mid1_channels_list) == 2)
assert (len(mid2_channels_list) == 4)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Inception3x3Branch(
in_channels=in_channels,
out_channels=mid2_channels_list[1],
mid_channels=mid1_channels_list[0],
strides=2,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(InceptionDouble3x3Branch(
in_channels=in_channels,
out_channels=mid2_channels_list[2],
mid_channels=mid1_channels_list[1],
strides=2,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0,
ceil_mode=True))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class BNInception(HybridBlock):
"""
BN-Inception model from 'Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate
Shift,' https://arxiv.org/abs/1502.03167.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels_list : list of int
Number of output channels for the initial unit.
mid1_channels_list : list of list of list of int
Number of pre-middle channels for each unit.
mid2_channels_list : list of list of list of int
Number of middle channels for each unit.
use_bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layers.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels_list,
mid1_channels_list,
mid2_channels_list,
use_bias=True,
use_bn=True,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(BNInception, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(StemBlock(
in_channels=in_channels,
out_channels=init_block_channels_list[1],
mid_channels=init_block_channels_list[0],
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels_list[-1]
for i, channels_per_stage in enumerate(channels):
mid1_channels_list_i = mid1_channels_list[i]
mid2_channels_list_i = mid2_channels_list[i]
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
if (j == 0) and (i != 0):
stage.add(ReductionBlock(
in_channels=in_channels,
mid1_channels_list=mid1_channels_list_i[j],
mid2_channels_list=mid2_channels_list_i[j],
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats))
else:
avg_pool = (i != len(channels) - 1) or (j != len(channels_per_stage) - 1)
stage.add(InceptionBlock(
in_channels=in_channels,
mid1_channels_list=mid1_channels_list_i[j],
mid2_channels_list=mid2_channels_list_i[j],
avg_pool=avg_pool,
use_bias=use_bias,
use_bn=use_bn,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_bninception(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create BN-Inception model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels_list = [64, 192]
channels = [[256, 320], [576, 576, 576, 608, 608], [1056, 1024, 1024]]
mid1_channels_list = [
[[64, 64],
[64, 64]],
[[128, 64], # 3c
[64, 96], # 4a
[96, 96], # 4a
[128, 128], # 4c
[128, 160]], # 4d
[[128, 192], # 4e
[192, 160], # 5a
[192, 192]],
]
mid2_channels_list = [
[[64, 64, 96, 32],
[64, 96, 96, 64]],
[[0, 160, 96, 0], # 3c
[224, 96, 128, 128], # 4a
[192, 128, 128, 128], # 4b
[160, 160, 160, 128], # 4c
[96, 192, 192, 128]], # 4d
[[0, 192, 256, 0], # 4e
[352, 320, 224, 128], # 5a
[352, 320, 224, 128]],
]
net = BNInception(
channels=channels,
init_block_channels_list=init_block_channels_list,
mid1_channels_list=mid1_channels_list,
mid2_channels_list=mid2_channels_list,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def bninception(**kwargs):
"""
BN-Inception model from 'Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate
Shift,' https://arxiv.org/abs/1502.03167.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_bninception(model_name="bninception", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
bninception,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != bninception or weight_count == 11295240)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 19,996 | 33.008503 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/msdnet.py | """
MSDNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Multi-Scale Dense Networks for Resource Efficient Image Classification,'
https://arxiv.org/abs/1703.09844.
"""
__all__ = ['MSDNet', 'msdnet22', 'MultiOutputSequential', 'MSDFeatureBlock']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, DualPathSequential
from .resnet import ResInitBlock
class MultiOutputSequential(nn.HybridSequential):
"""
A sequential container for blocks. Blocks will be executed in the order they are added. Output value contains
results from all blocks.
"""
def __init__(self, **kwargs):
super(MultiOutputSequential, self).__init__(**kwargs)
def hybrid_forward(self, F, x):
outs = []
for block in self._children.values():
x = block(x)
outs.append(x)
return outs
class MultiBlockSequential(nn.HybridSequential):
"""
A sequential container for blocks. Blocks will be executed in the order they are added. Input is a list with
length equal to number of blocks.
"""
def __init__(self, **kwargs):
super(MultiBlockSequential, self).__init__(**kwargs)
def hybrid_forward(self, F, x0, x_rest):
outs = []
for block, x_i in zip(self._children.values(), [x0] + x_rest):
y = block(x_i)
outs.append(y)
return outs
class MSDBaseBlock(HybridBlock):
"""
MSDNet base block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bottleneck : bool
Whether to use a bottleneck.
bottleneck_factor : int
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_bottleneck,
bottleneck_factor,
**kwargs):
super(MSDBaseBlock, self).__init__(**kwargs)
self.use_bottleneck = use_bottleneck
mid_channels = min(in_channels, bottleneck_factor * out_channels) if use_bottleneck else in_channels
with self.name_scope():
if self.use_bottleneck:
self.bn_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=strides)
def hybrid_forward(self, F, x):
if self.use_bottleneck:
x = self.bn_conv(x)
x = self.conv(x)
return x
class MSDFirstScaleBlock(HybridBlock):
"""
MSDNet first scale dense block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_bottleneck : bool
Whether to use a bottleneck.
bottleneck_factor : int
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
use_bottleneck,
bottleneck_factor,
**kwargs):
super(MSDFirstScaleBlock, self).__init__(**kwargs)
assert (out_channels > in_channels)
inc_channels = out_channels - in_channels
with self.name_scope():
self.block = MSDBaseBlock(
in_channels=in_channels,
out_channels=inc_channels,
strides=1,
use_bottleneck=use_bottleneck,
bottleneck_factor=bottleneck_factor)
def hybrid_forward(self, F, x):
y = self.block(x)
y = F.concat(x, y, dim=1)
return y
class MSDScaleBlock(HybridBlock):
"""
MSDNet ordinary scale dense block.
Parameters:
----------
in_channels_prev : int
Number of input channels for the previous scale.
in_channels : int
Number of input channels for the current scale.
out_channels : int
Number of output channels.
use_bottleneck : bool
Whether to use a bottleneck.
bottleneck_factor_prev : int
Bottleneck factor for the previous scale.
bottleneck_factor : int
Bottleneck factor for the current scale.
"""
def __init__(self,
in_channels_prev,
in_channels,
out_channels,
use_bottleneck,
bottleneck_factor_prev,
bottleneck_factor,
**kwargs):
super(MSDScaleBlock, self).__init__(**kwargs)
assert (out_channels > in_channels)
assert (out_channels % 2 == 0)
inc_channels = out_channels - in_channels
mid_channels = inc_channels // 2
with self.name_scope():
self.down_block = MSDBaseBlock(
in_channels=in_channels_prev,
out_channels=mid_channels,
strides=2,
use_bottleneck=use_bottleneck,
bottleneck_factor=bottleneck_factor_prev)
self.curr_block = MSDBaseBlock(
in_channels=in_channels,
out_channels=mid_channels,
strides=1,
use_bottleneck=use_bottleneck,
bottleneck_factor=bottleneck_factor)
def hybrid_forward(self, F, x_prev, x):
y_prev = self.down_block(x_prev)
y = self.curr_block(x)
x = F.concat(x, y_prev, y, dim=1)
return x
class MSDInitLayer(HybridBlock):
"""
MSDNet initial (so-called first) layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : list/tuple of int
Number of output channels for each scale.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(MSDInitLayer, self).__init__(**kwargs)
with self.name_scope():
self.scale_blocks = MultiOutputSequential()
for i, out_channels_per_scale in enumerate(out_channels):
if i == 0:
self.scale_blocks.add(ResInitBlock(
in_channels=in_channels,
out_channels=out_channels_per_scale))
else:
self.scale_blocks.add(conv3x3_block(
in_channels=in_channels,
out_channels=out_channels_per_scale,
strides=2))
in_channels = out_channels_per_scale
def hybrid_forward(self, F, x):
y = self.scale_blocks(x)
return y
class MSDLayer(HybridBlock):
"""
MSDNet ordinary layer.
Parameters:
----------
in_channels : list/tuple of int
Number of input channels for each input scale.
out_channels : list/tuple of int
Number of output channels for each output scale.
use_bottleneck : bool
Whether to use a bottleneck.
bottleneck_factors : list/tuple of int
Bottleneck factor for each input scale.
"""
def __init__(self,
in_channels,
out_channels,
use_bottleneck,
bottleneck_factors,
**kwargs):
super(MSDLayer, self).__init__(**kwargs)
in_scales = len(in_channels)
out_scales = len(out_channels)
self.dec_scales = in_scales - out_scales
assert (self.dec_scales >= 0)
with self.name_scope():
self.scale_blocks = nn.HybridSequential(prefix="")
for i in range(out_scales):
if (i == 0) and (self.dec_scales == 0):
self.scale_blocks.add(MSDFirstScaleBlock(
in_channels=in_channels[self.dec_scales + i],
out_channels=out_channels[i],
use_bottleneck=use_bottleneck,
bottleneck_factor=bottleneck_factors[self.dec_scales + i]))
else:
self.scale_blocks.add(MSDScaleBlock(
in_channels_prev=in_channels[self.dec_scales + i - 1],
in_channels=in_channels[self.dec_scales + i],
out_channels=out_channels[i],
use_bottleneck=use_bottleneck,
bottleneck_factor_prev=bottleneck_factors[self.dec_scales + i - 1],
bottleneck_factor=bottleneck_factors[self.dec_scales + i]))
def hybrid_forward(self, F, x0, x_rest):
x = [x0] + x_rest
outs = []
for i in range(len(self.scale_blocks)):
if (i == 0) and (self.dec_scales == 0):
y = self.scale_blocks[i](x[i])
else:
y = self.scale_blocks[i](
x[self.dec_scales + i - 1],
x[self.dec_scales + i])
outs.append(y)
return outs[0], outs[1:]
class MSDTransitionLayer(HybridBlock):
"""
MSDNet transition layer.
Parameters:
----------
in_channels : list/tuple of int
Number of input channels for each scale.
out_channels : list/tuple of int
Number of output channels for each scale.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(MSDTransitionLayer, self).__init__(**kwargs)
assert (len(in_channels) == len(out_channels))
with self.name_scope():
self.scale_blocks = MultiBlockSequential()
for i in range(len(out_channels)):
self.scale_blocks.add(conv1x1_block(
in_channels=in_channels[i],
out_channels=out_channels[i]))
def hybrid_forward(self, F, x0, x_rest):
y = self.scale_blocks(x0, x_rest)
return y[0], y[1:]
class MSDFeatureBlock(HybridBlock):
"""
MSDNet feature block (stage of cascade, so-called block).
Parameters:
----------
in_channels : list of list of int
Number of input channels for each layer and for each input scale.
out_channels : list of list of int
Number of output channels for each layer and for each output scale.
use_bottleneck : bool
Whether to use a bottleneck.
bottleneck_factors : list of list of int
Bottleneck factor for each layer and for each input scale.
"""
def __init__(self,
in_channels,
out_channels,
use_bottleneck,
bottleneck_factors,
**kwargs):
super(MSDFeatureBlock, self).__init__(**kwargs)
with self.name_scope():
self.blocks = DualPathSequential(prefix="")
for i, out_channels_per_layer in enumerate(out_channels):
if len(bottleneck_factors[i]) == 0:
self.blocks.add(MSDTransitionLayer(
in_channels=in_channels,
out_channels=out_channels_per_layer))
else:
self.blocks.add(MSDLayer(
in_channels=in_channels,
out_channels=out_channels_per_layer,
use_bottleneck=use_bottleneck,
bottleneck_factors=bottleneck_factors[i]))
in_channels = out_channels_per_layer
def hybrid_forward(self, F, x0, x_rest):
x0, x_rest = self.blocks(x0, x_rest)
return [x0] + x_rest
class MSDClassifier(HybridBlock):
"""
MSDNet classifier.
Parameters:
----------
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
"""
def __init__(self,
in_channels,
classes,
**kwargs):
super(MSDClassifier, self).__init__(**kwargs)
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
strides=2))
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
strides=2))
self.features.add(nn.AvgPool2D(
pool_size=2,
strides=2))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
class MSDNet(HybridBlock):
"""
MSDNet model from 'Multi-Scale Dense Networks for Resource Efficient Image Classification,'
https://arxiv.org/abs/1703.09844.
Parameters:
----------
channels : list of list of list of int
Number of output channels for each unit.
init_layer_channels : list of int
Number of output channels for the initial layer.
num_feature_blocks : int
Number of subnets.
use_bottleneck : bool
Whether to use a bottleneck.
bottleneck_factors : list of list of int
Bottleneck factor for each layers and for each input scale.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_layer_channels,
num_feature_blocks,
use_bottleneck,
bottleneck_factors,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(MSDNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.init_layer = MSDInitLayer(
in_channels=in_channels,
out_channels=init_layer_channels)
in_channels = init_layer_channels
self.feature_blocks = nn.HybridSequential(prefix="")
self.classifiers = nn.HybridSequential(prefix="")
for i in range(num_feature_blocks):
self.feature_blocks.add(MSDFeatureBlock(
in_channels=in_channels,
out_channels=channels[i],
use_bottleneck=use_bottleneck,
bottleneck_factors=bottleneck_factors[i]))
in_channels = channels[i][-1]
self.classifiers.add(MSDClassifier(
in_channels=in_channels[-1],
classes=classes))
def hybrid_forward(self, F, x, only_last=True):
x = self.init_layer(x)
outs = []
for feature_block, classifier in zip(self.feature_blocks, self.classifiers):
x = feature_block(x[0], x[1:])
y = classifier(x[-1])
outs.append(y)
if only_last:
return outs[-1]
else:
return outs
def get_msdnet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create MSDNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (blocks == 22)
num_scales = 4
num_feature_blocks = 10
base = 4
step = 2
reduction_rate = 0.5
growth = 6
growth_factor = [1, 2, 4, 4]
use_bottleneck = True
bottleneck_factor_per_scales = [1, 2, 4, 4]
assert (reduction_rate > 0.0)
init_layer_channels = [64 * c for c in growth_factor[:num_scales]]
step_mode = "even"
layers_per_subnets = [base]
for i in range(num_feature_blocks - 1):
layers_per_subnets.append(step if step_mode == 'even' else step * i + 1)
total_layers = sum(layers_per_subnets)
interval = math.ceil(total_layers / num_scales)
global_layer_ind = 0
channels = []
bottleneck_factors = []
in_channels_tmp = init_layer_channels
in_scales = num_scales
for i in range(num_feature_blocks):
layers_per_subnet = layers_per_subnets[i]
scales_i = []
channels_i = []
bottleneck_factors_i = []
for j in range(layers_per_subnet):
out_scales = int(num_scales - math.floor(global_layer_ind / interval))
global_layer_ind += 1
scales_i += [out_scales]
scale_offset = num_scales - out_scales
in_dec_scales = num_scales - len(in_channels_tmp)
out_channels = [in_channels_tmp[scale_offset - in_dec_scales + k] + growth * growth_factor[scale_offset + k]
for k in range(out_scales)]
in_dec_scales = num_scales - len(in_channels_tmp)
bottleneck_factors_ij = bottleneck_factor_per_scales[in_dec_scales:][:len(in_channels_tmp)]
in_channels_tmp = out_channels
channels_i += [out_channels]
bottleneck_factors_i += [bottleneck_factors_ij]
if in_scales > out_scales:
assert (in_channels_tmp[0] % growth_factor[scale_offset] == 0)
out_channels1 = int(math.floor(in_channels_tmp[0] / growth_factor[scale_offset] * reduction_rate))
out_channels = [out_channels1 * growth_factor[scale_offset + k] for k in range(out_scales)]
in_channels_tmp = out_channels
channels_i += [out_channels]
bottleneck_factors_i += [[]]
in_scales = out_scales
in_scales = scales_i[-1]
channels += [channels_i]
bottleneck_factors += [bottleneck_factors_i]
net = MSDNet(
channels=channels,
init_layer_channels=init_layer_channels,
num_feature_blocks=num_feature_blocks,
use_bottleneck=use_bottleneck,
bottleneck_factors=bottleneck_factors,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def msdnet22(**kwargs):
"""
MSDNet-22 model from 'Multi-Scale Dense Networks for Resource Efficient Image Classification,'
https://arxiv.org/abs/1703.09844.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_msdnet(blocks=22, model_name="msdnet22", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
msdnet22,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != msdnet22 or weight_count == 20106676)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 20,581 | 31.566456 | 120 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.