repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
imgclsmob | imgclsmob-master/other/chainer_/cifar1.py | """
CIFAR/SVHN dataset routines.
"""
import numpy as np
import chainer
from chainer import iterators
from chainer import Chain
from chainer.dataset import DatasetMixin
from chainer.datasets import cifar, svhn
__all__ = ['add_dataset_parser_arguments', 'get_val_data_iterator', 'get_data_iterators', 'CIFARPredictor']
def add_dataset_parser_arguments(parser,
dataset_name):
if dataset_name == "CIFAR10":
parser.add_argument(
'--num-classes',
type=int,
default=10,
help='number of classes')
elif dataset_name == "CIFAR100":
parser.add_argument(
'--num-classes',
type=int,
default=100,
help='number of classes')
elif dataset_name == "SVHN":
parser.add_argument(
'--num-classes',
type=int,
default=10,
help='number of classes')
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
parser.add_argument(
'--in-channels',
type=int,
default=3,
help='number of input channels')
class CIFARPredictor(Chain):
def __init__(self,
base_model,
mean=(0.4914, 0.4822, 0.4465),
std=(0.2023, 0.1994, 0.2010)):
super(CIFARPredictor, self).__init__()
self.mean = np.array(mean, np.float32)[:, np.newaxis, np.newaxis]
self.std = np.array(std, np.float32)[:, np.newaxis, np.newaxis]
with self.init_scope():
self.model = base_model
def _preprocess(self, img):
img -= self.mean
img /= self.std
return img
def predict(self, imgs):
imgs = self.xp.asarray([self._preprocess(img) for img in imgs])
with chainer.using_config('train', False), chainer.function.no_backprop_mode():
imgs = chainer.Variable(imgs)
predictions = self.model(imgs)
output = chainer.backends.cuda.to_cpu(predictions.array)
return output
def get_val_data_iterator(dataset_name,
batch_size,
num_workers):
if dataset_name == "CIFAR10":
_, test_ds = cifar.get_cifar10()
elif dataset_name == "CIFAR100":
_, test_ds = cifar.get_cifar100()
elif dataset_name == "SVHN":
_, test_ds = svhn.get_svhn()
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
val_dataset = test_ds
val_dataset_len = len(val_dataset)
val_iterator = iterators.MultiprocessIterator(
dataset=val_dataset,
batch_size=batch_size,
repeat=False,
shuffle=False,
n_processes=num_workers,
shared_mem=300000000)
return val_iterator, val_dataset_len
class PreprocessedCIFARDataset(DatasetMixin):
def __init__(self,
train,
mean=(0.4914, 0.4822, 0.4465),
std=(0.2023, 0.1994, 0.2010)):
train_ds, test_ds = cifar.get_cifar10()
self.base = train_ds if train else test_ds
self.mean = np.array(mean, np.float32)[:, np.newaxis, np.newaxis]
self.std = np.array(std, np.float32)[:, np.newaxis, np.newaxis]
def __len__(self):
return len(self.base)
def _preprocess(self, img):
img -= self.mean
img /= self.std
return img
def get_example(self, i):
image, label = self.base[i]
image = self._preprocess(image)
return image, label
def get_data_iterators(batch_size,
num_workers):
train_dataset = PreprocessedCIFARDataset(train=True)
train_iterator = iterators.MultiprocessIterator(
dataset=train_dataset,
batch_size=batch_size,
repeat=False,
shuffle=True,
n_processes=num_workers)
val_dataset = PreprocessedCIFARDataset(train=False)
val_iterator = iterators.MultiprocessIterator(
dataset=val_dataset,
batch_size=batch_size,
repeat=False,
shuffle=False,
n_processes=num_workers)
return train_iterator, val_iterator
| 4,156 | 27.868056 | 107 | py |
imgclsmob | imgclsmob-master/other/chainer_/train_ch_cifar.py | import argparse
import numpy as np
import chainer
from chainer import cuda
from chainer import training
from chainer.training import extensions
from chainer.serializers import save_npz
from common.logger_utils import initialize_logging
from chainer_.utils import prepare_model
from chainer_.cifar1 import add_dataset_parser_arguments
from chainer_.cifar1 import get_data_iterators
def parse_args():
parser = argparse.ArgumentParser(
description='Train a model for image classification (Chainer/CIFAR)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--dataset',
type=str,
default="CIFAR10",
help='dataset name. options are CIFAR10 and CIFAR100')
args, _ = parser.parse_known_args()
add_dataset_parser_arguments(parser, args.dataset)
parser.add_argument(
'--model',
type=str,
required=True,
help='type of model to use. see model_provider for options.')
parser.add_argument(
'--use-pretrained',
action='store_true',
help='enable using pretrained model from gluon.')
parser.add_argument(
'--resume',
type=str,
default='',
help='resume from previously saved parameters if not None')
parser.add_argument(
'--resume-state',
type=str,
default='',
help='resume from previously saved optimizer state if not None')
parser.add_argument(
'--num-gpus',
type=int,
default=0,
help='number of gpus to use.')
parser.add_argument(
'-j',
'--num-data-workers',
dest='num_workers',
default=4,
type=int,
help='number of preprocessing workers')
parser.add_argument(
'--batch-size',
type=int,
default=512,
help='training batch size per device (CPU/GPU).')
parser.add_argument(
'--num-epochs',
type=int,
default=120,
help='number of training epochs.')
parser.add_argument(
'--start-epoch',
type=int,
default=1,
help='starting epoch for resuming, default is 1 for new training')
parser.add_argument(
'--attempt',
type=int,
default=1,
help='current number of training')
parser.add_argument(
'--optimizer-name',
type=str,
default='nag',
help='optimizer name')
parser.add_argument(
'--lr',
type=float,
default=0.1,
help='learning rate. default is 0.1')
parser.add_argument(
'--lr-mode',
type=str,
default='cosine',
help='learning rate scheduler mode. options are step, poly and cosine')
parser.add_argument(
'--lr-decay',
type=float,
default=0.1,
help='decay rate of learning rate. default is 0.1')
parser.add_argument(
'--lr-decay-period',
type=int,
default=0,
help='interval for periodic learning rate decays. default is 0 to disable.')
parser.add_argument(
'--lr-decay-epoch',
type=str,
default='40,60',
help='epoches at which learning rate decays. default is 40,60.')
parser.add_argument(
'--target-lr',
type=float,
default=1e-8,
help='ending learning rate; default is 1e-8')
parser.add_argument(
'--momentum',
type=float,
default=0.9,
help='momentum value for optimizer; default is 0.9')
parser.add_argument(
'--wd',
type=float,
default=0.0001,
help='weight decay rate. default is 0.0001.')
parser.add_argument(
'--log-interval',
type=int,
default=50,
help='number of batches to wait before logging.')
parser.add_argument(
'--save-interval',
type=int,
default=4,
help='saving parameters epoch interval, best model will always be saved')
parser.add_argument(
'--save-dir',
type=str,
default='',
help='directory of saved models and log-files')
parser.add_argument(
'--logging-file-name',
type=str,
default='train.log',
help='filename of training log')
parser.add_argument(
'--seed',
type=int,
default=-1,
help='Random seed to be fixed')
parser.add_argument(
'--log-packages',
type=str,
default='mxnet',
help='list of python packages for logging')
parser.add_argument(
'--log-pip-packages',
type=str,
default='mxnet-cu92, cupy-cuda100, gluoncv',
help='list of pip packages for logging')
args = parser.parse_args()
return args
def init_rand(seed):
if seed <= 0:
seed = np.random.randint(10000)
return seed
def prepare_trainer(net,
optimizer_name,
lr,
momentum,
num_epochs,
train_iter,
val_iter,
logging_dir_path,
num_gpus=0):
if optimizer_name == "sgd":
optimizer = chainer.optimizers.MomentumSGD(lr=lr, momentum=momentum)
elif optimizer_name == "nag":
optimizer = chainer.optimizers.NesterovAG(lr=lr, momentum=momentum)
else:
raise Exception('Unsupported optimizer: {}'.format(optimizer_name))
optimizer.setup(net)
# devices = tuple(range(num_gpus)) if num_gpus > 0 else (-1, )
devices = (0,) if num_gpus > 0 else (-1,)
updater = training.updaters.StandardUpdater(
iterator=train_iter,
optimizer=optimizer,
device=devices[0])
trainer = training.Trainer(
updater=updater,
stop_trigger=(num_epochs, 'epoch'),
out=logging_dir_path)
val_interval = 100000, 'iteration'
log_interval = 1000, 'iteration'
trainer.extend(
extension=extensions.Evaluator(
val_iter,
net,
device=devices[0]),
trigger=val_interval)
trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.snapshot(), trigger=val_interval)
trainer.extend(
extensions.snapshot_object(
net,
'model_iter_{.updater.iteration}'),
trigger=val_interval)
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(
extensions.PrintReport([
'epoch', 'iteration', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy',
'lr']),
trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
return trainer
def save_params(file_stem,
net,
trainer):
save_npz(
file=file_stem + '.npz',
obj=net)
save_npz(
file=file_stem + '.states',
obj=trainer)
def main():
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
num_gpus = args.num_gpus
if num_gpus > 0:
cuda.get_device(0).use()
batch_size = args.batch_size
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
num_gpus=num_gpus)
train_iter, val_iter = get_data_iterators(
batch_size=batch_size,
num_workers=args.num_workers)
trainer = prepare_trainer(
net=net,
optimizer_name=args.optimizer_name,
lr=args.lr,
momentum=args.momentum,
num_epochs=args.num_epochs,
train_iter=train_iter,
val_iter=val_iter,
logging_dir_path=args.save_dir,
num_gpus=num_gpus)
# if args.save_dir and args.save_interval:
# lp_saver = TrainLogParamSaver(
# checkpoint_file_name_prefix='imagenet_{}'.format(args.model),
# last_checkpoint_file_name_suffix="last",
# best_checkpoint_file_name_suffix=None,
# last_checkpoint_dir_path=args.save_dir,
# best_checkpoint_dir_path=None,
# last_checkpoint_file_count=2,
# best_checkpoint_file_count=2,
# checkpoint_file_save_callback=save_params,
# checkpoint_file_exts=['.npz', '.states'],
# save_interval=args.save_interval,
# num_epochs=args.num_epochs,
# param_names=['Val.Top1', 'Train.Top1', 'Val.Top5', 'Train.Loss', 'LR'],
# acc_ind=2,
# # bigger=[True],
# # mask=None,
# score_log_file_path=os.path.join(args.save_dir, 'score.log'),
# score_log_attempt_value=args.attempt,
# best_map_log_file_path=os.path.join(args.save_dir, 'best_map.log'))
# else:
# lp_saver = None
trainer.run()
if __name__ == '__main__':
main()
| 9,190 | 28.744337 | 115 | py |
imgclsmob | imgclsmob-master/other/chainer_/seg_utils1.py | """
Segmentation datasets (VOC2012/ADE20K/Cityscapes/COCO) routines.
"""
__all__ = ['add_dataset_parser_arguments', 'get_test_dataset', 'get_metainfo', 'SegPredictor']
import numpy as np
import chainer
from chainer import Chain
from chainer_.datasets.voc_seg_dataset import VOCSegDataset
from chainer_.datasets.ade20k_seg_dataset import ADE20KSegDataset
from chainer_.datasets.cityscapes_seg_dataset import CityscapesSegDataset
from chainer_.datasets.coco_seg_dataset import CocoSegDataset
def add_dataset_parser_arguments(parser,
dataset_name):
if dataset_name == "VOC":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/voc',
help='path to directory with Pascal VOC2012 dataset')
parser.add_argument(
'--num-classes',
type=int,
default=21,
help='number of classes')
elif dataset_name == "ADE20K":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/ade20k',
help='path to directory with ADE20K dataset')
parser.add_argument(
'--num-classes',
type=int,
default=150,
help='number of classes')
elif dataset_name == "Cityscapes":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/cityscapes',
help='path to directory with Cityscapes dataset')
parser.add_argument(
'--num-classes',
type=int,
default=19,
help='number of classes')
elif dataset_name == "COCO":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/coco',
help='path to directory with COCO dataset')
parser.add_argument(
'--num-classes',
type=int,
default=21,
help='number of classes')
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
parser.add_argument(
'--in-channels',
type=int,
default=3,
help='number of input channels')
parser.add_argument(
'--image-base-size',
type=int,
default=520,
help='base image size')
parser.add_argument(
'--image-crop-size',
type=int,
default=480,
help='crop image size')
class SegPredictor(Chain):
def __init__(self,
base_model,
mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)):
super(SegPredictor, self).__init__()
self.mean = np.array(mean, np.float32)[:, np.newaxis, np.newaxis]
self.std = np.array(std, np.float32)[:, np.newaxis, np.newaxis]
with self.init_scope():
self.model = base_model
def _preprocess(self, img):
dtype = chainer.get_dtype(None)
img = img.transpose(2, 0, 1)
img = img.astype(dtype)
img *= 1.0 / 255.0
img -= self.mean
img /= self.std
return img
def predict(self, imgs):
imgs = self.xp.asarray([self._preprocess(img) for img in imgs])
with chainer.using_config("train", False), chainer.function.no_backprop_mode():
imgs = chainer.Variable(imgs)
predictions = self.model(imgs)
output = chainer.backends.cuda.to_cpu(predictions.array)
# output = np.argmax(output, axis=1).astype(np.int32)
return output
def get_metainfo(dataset_name):
if dataset_name == "VOC":
return {
"vague_idx": VOCSegDataset.vague_idx,
"use_vague": VOCSegDataset.use_vague,
"background_idx": VOCSegDataset.background_idx,
"ignore_bg": VOCSegDataset.ignore_bg}
elif dataset_name == "ADE20K":
return {
"vague_idx": ADE20KSegDataset.vague_idx,
"use_vague": ADE20KSegDataset.use_vague,
"background_idx": ADE20KSegDataset.background_idx,
"ignore_bg": ADE20KSegDataset.ignore_bg}
elif dataset_name == "Cityscapes":
return {
"vague_idx": CityscapesSegDataset.vague_idx,
"use_vague": CityscapesSegDataset.use_vague,
"background_idx": CityscapesSegDataset.background_idx,
"ignore_bg": CityscapesSegDataset.ignore_bg}
elif dataset_name == "COCO":
return {
"vague_idx": CocoSegDataset.vague_idx,
"use_vague": CocoSegDataset.use_vague,
"background_idx": CocoSegDataset.background_idx,
"ignore_bg": CocoSegDataset.ignore_bg}
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
def get_test_dataset(dataset_name,
dataset_dir):
if dataset_name == "VOC":
dataset_class = VOCSegDataset
elif dataset_name == "ADE20K":
dataset_class = ADE20KSegDataset
elif dataset_name == "Cityscapes":
dataset_class = CityscapesSegDataset
elif dataset_name == "COCO":
dataset_class = CocoSegDataset
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
dataset = dataset_class(
root=dataset_dir,
mode="test",
transform=None)
return dataset
| 5,374 | 31.575758 | 94 | py |
imgclsmob | imgclsmob-master/other/chainer_/__init__.py | 0 | 0 | 0 | py | |
imgclsmob | imgclsmob-master/other/datasets/__init__.py | 0 | 0 | 0 | py | |
imgclsmob | imgclsmob-master/other/gluon/seg_utils1.py | """
Segmentation datasets (VOC2012/ADE20K/Cityscapes/COCO) routines.
"""
__all__ = ['add_dataset_parser_arguments', 'batch_fn', 'get_test_data_source', 'get_num_training_samples', 'validate1',
'get_metainfo']
from tqdm import tqdm
from mxnet import gluon
from mxnet.gluon.data.vision import transforms
from gluon.datasets.voc_seg_dataset import VOCSegDataset
from gluon.datasets.ade20k_seg_dataset import ADE20KSegDataset
from gluon.datasets.cityscapes_seg_dataset import CityscapesSegDataset
from gluon.datasets.coco_seg_dataset import CocoSegDataset
# from gluoncv.data.mscoco.segmentation import COCOSegmentation
def add_dataset_parser_arguments(parser,
dataset_name):
if dataset_name == "VOC":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/voc',
help='path to directory with Pascal VOC2012 dataset')
parser.add_argument(
'--num-classes',
type=int,
default=21,
help='number of classes')
elif dataset_name == "ADE20K":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/ade20k',
help='path to directory with ADE20K dataset')
parser.add_argument(
'--num-classes',
type=int,
default=150,
help='number of classes')
elif dataset_name == "Cityscapes":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/cityscapes',
help='path to directory with Cityscapes dataset')
parser.add_argument(
'--num-classes',
type=int,
default=19,
help='number of classes')
elif dataset_name == "COCO":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/coco',
help='path to directory with COCO dataset')
parser.add_argument(
'--num-classes',
type=int,
default=21,
help='number of classes')
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
parser.add_argument(
'--in-channels',
type=int,
default=3,
help='number of input channels')
parser.add_argument(
'--image-base-size',
type=int,
default=520,
help='base image size')
parser.add_argument(
'--image-crop-size',
type=int,
default=480,
help='crop image size')
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
return data, label
def get_num_training_samples(dataset_name):
if dataset_name == "ADE20K":
return None
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
def get_metainfo(dataset_name):
if dataset_name == "VOC":
return {
"vague_idx": VOCSegDataset.vague_idx,
"use_vague": VOCSegDataset.use_vague,
"background_idx": VOCSegDataset.background_idx,
"ignore_bg": VOCSegDataset.ignore_bg}
elif dataset_name == "ADE20K":
return {
"vague_idx": ADE20KSegDataset.vague_idx,
"use_vague": ADE20KSegDataset.use_vague,
"background_idx": ADE20KSegDataset.background_idx,
"ignore_bg": ADE20KSegDataset.ignore_bg}
elif dataset_name == "Cityscapes":
return {
"vague_idx": CityscapesSegDataset.vague_idx,
"use_vague": CityscapesSegDataset.use_vague,
"background_idx": CityscapesSegDataset.background_idx,
"ignore_bg": CityscapesSegDataset.ignore_bg}
elif dataset_name == "COCO":
return {
"vague_idx": CocoSegDataset.vague_idx,
"use_vague": CocoSegDataset.use_vague,
"background_idx": CocoSegDataset.background_idx,
"ignore_bg": CocoSegDataset.ignore_bg}
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
def get_test_data_source(dataset_name,
dataset_dir,
batch_size,
num_workers):
mean_rgb = (0.485, 0.456, 0.406)
std_rgb = (0.229, 0.224, 0.225)
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)
])
if dataset_name == "VOC":
dataset_class = VOCSegDataset
elif dataset_name == "ADE20K":
dataset_class = ADE20KSegDataset
elif dataset_name == "Cityscapes":
dataset_class = CityscapesSegDataset
elif dataset_name == "COCO":
dataset_class = CocoSegDataset
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
dataset = dataset_class(
root=dataset_dir,
mode="test",
transform=transform_val)
return gluon.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers)
def validate1(accuracy_metric,
net,
val_data,
batch_fn,
data_source_needs_reset,
dtype,
ctx):
if data_source_needs_reset:
val_data.reset()
accuracy_metric.reset()
for batch in tqdm(val_data):
data_list, labels_list = batch_fn(batch, ctx)
outputs_list = [net(X.astype(dtype, copy=False)) for X in data_list]
accuracy_metric.update(labels_list, outputs_list)
accuracy_info = accuracy_metric.get()
return accuracy_info
| 5,809 | 31.640449 | 119 | py |
imgclsmob | imgclsmob-master/other/gluon/__init__.py | 0 | 0 | 0 | py | |
imgclsmob | imgclsmob-master/other/gluon/khpa/khpa_utils.py | """
KHPA dataset routines.
"""
__all__ = ['add_dataset_parser_arguments', 'get_batch_fn', 'get_train_data_source', 'get_val_data_source', 'validate']
import math
from mxnet import gluon
from gluon.weighted_random_sampler import WeightedRandomSampler
from other.gluon.khpa.khpa_cls_dataset import KHPA
def add_dataset_parser_arguments(parser):
parser.add_argument(
'--data-path',
type=str,
default='../imgclsmob_data/khpa',
help='path to KHPA dataset')
parser.add_argument(
'--split-file',
type=str,
default='../imgclsmob_data/khpa/split.csv',
help='path to file with splitting training subset on training and validation ones')
parser.add_argument(
'--gen-split',
action='store_true',
help='whether generate split file')
parser.add_argument(
'--num-split-folders',
type=int,
default=10,
help='number of folders for validation subsets')
parser.add_argument(
'--stats-file',
type=str,
default='../imgclsmob_data/khpa/stats.json',
help='path to file with the dataset statistics')
parser.add_argument(
'--gen-stats',
action='store_true',
help='whether generate a file with the dataset statistics')
parser.add_argument(
'--input-size',
type=int,
default=224,
help='size of the input for model')
parser.add_argument(
'--resize-inv-factor',
type=float,
default=0.875,
help='inverted ratio for input image crop')
parser.add_argument(
'--num-classes',
type=int,
default=56,
help='number of classes')
parser.add_argument(
'--in-channels',
type=int,
default=4,
help='number of input channels')
def get_batch_fn():
def batch_fn(batch, ctx):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
# weight = gluon.utils.split_and_load(batch[2].astype(np.float32, copy=False), ctx_list=ctx, batch_axis=0)
return data, label
return batch_fn
def get_train_data_loader(data_dir_path,
split_file_path,
generate_split,
num_split_folders,
stats_file_path,
generate_stats,
batch_size,
num_workers,
model_input_image_size):
dataset = KHPA(
root=data_dir_path,
split_file_path=split_file_path,
generate_split=generate_split,
num_split_folders=num_split_folders,
stats_file_path=stats_file_path,
generate_stats=generate_stats,
model_input_image_size=model_input_image_size,
train=True)
sampler = WeightedRandomSampler(
length=len(dataset),
weights=dataset.sample_weights)
return gluon.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
# shuffle=True,
sampler=sampler,
last_batch="discard",
num_workers=num_workers)
def get_val_data_loader(data_dir_path,
split_file_path,
generate_split,
num_split_folders,
stats_file_path,
generate_stats,
batch_size,
num_workers,
model_input_image_size,
preproc_resize_image_size):
return gluon.data.DataLoader(
dataset=KHPA(
root=data_dir_path,
split_file_path=split_file_path,
generate_split=generate_split,
num_split_folders=num_split_folders,
stats_file_path=stats_file_path,
generate_stats=generate_stats,
preproc_resize_image_size=preproc_resize_image_size,
model_input_image_size=model_input_image_size,
train=False),
batch_size=batch_size,
shuffle=False,
num_workers=num_workers)
def get_train_data_source(dataset_args,
batch_size,
num_workers,
input_image_size=(224, 224)):
return get_train_data_loader(
data_dir_path=dataset_args.data_path,
split_file_path=dataset_args.split_file,
generate_split=dataset_args.gen_split,
num_split_folders=dataset_args.num_split_folders,
stats_file_path=dataset_args.stats_file,
generate_stats=dataset_args.gen_stats,
batch_size=batch_size,
num_workers=num_workers,
model_input_image_size=input_image_size)
def get_val_data_source(dataset_args,
batch_size,
num_workers,
input_image_size=(224, 224),
resize_inv_factor=0.875):
assert (resize_inv_factor > 0.0)
if isinstance(input_image_size, int):
input_image_size = (input_image_size, input_image_size)
resize_value = int(math.ceil(float(input_image_size[0]) / resize_inv_factor))
return get_val_data_loader(
data_dir_path=dataset_args.data_path,
split_file_path=dataset_args.split_file,
generate_split=dataset_args.gen_split,
num_split_folders=dataset_args.num_split_folders,
stats_file_path=dataset_args.stats_file,
generate_stats=dataset_args.gen_stats,
batch_size=batch_size,
num_workers=num_workers,
model_input_image_size=input_image_size,
preproc_resize_image_size=resize_value)
def validate(metric_calc,
net,
val_data,
batch_fn,
data_source_needs_reset,
dtype,
ctx):
if data_source_needs_reset:
val_data.reset()
metric_calc.reset()
for batch in val_data:
data_list, labels_list = batch_fn(batch, ctx)
onehot_outputs_list = [net(X.astype(dtype, copy=False)).reshape(0, -1, 2) for X in data_list]
labels_list_ = [Y.reshape(-1,) for Y in labels_list]
onehot_outputs_list_ = [Y.reshape(-1, 2) for Y in onehot_outputs_list]
metric_calc.update(
src_pts=labels_list_,
dst_pts=onehot_outputs_list_)
metric_name_value = metric_calc.get()
return metric_name_value
| 6,499 | 33.210526 | 118 | py |
imgclsmob | imgclsmob-master/other/gluon/khpa/khpa_cls_dataset.py | """
KHPA classification dataset.
"""
import os
import json
import logging
import numpy as np
import pandas as pd
import mxnet as mx
from mxnet.gluon.data import Dataset
from imgaug import augmenters as iaa
from imgaug import parameters as iap
class KHPA(Dataset):
"""
Load the KHPA classification dataset.
Parameters:
----------
root : str, default '~/.mxnet/datasets/imagenet'
Path to the folder stored the dataset.
train : bool, default True
Whether to load the training or validation set.
"""
def __init__(self,
root=os.path.join("~", ".mxnet", "datasets", "khpa"),
split_file_path=os.path.join("~", ".mxnet", "datasets", "khpa", "split.csv"),
generate_split=False,
num_split_folders=10,
working_split_folder_ind1=1,
stats_file_path=os.path.join("~", ".mxnet", "datasets", "khpa", "stats.json"),
generate_stats=False,
num_classes=28,
preproc_resize_image_size=(256, 256),
model_input_image_size=(224, 224),
train=True):
super(KHPA, self).__init__()
self.suffices = ("red", "green", "blue", "yellow")
root_dir_path = os.path.expanduser(root)
assert os.path.exists(root_dir_path)
train_file_name = "train.csv"
train_file_path = os.path.join(root_dir_path, train_file_name)
if not os.path.exists(train_file_path):
raise Exception("Train file doesn't exist: {}".format(train_file_path))
images_dir_path = os.path.join(root_dir_path, "train")
if not os.path.exists(images_dir_path):
raise Exception("Train image directory doesn't exist: {}".format(images_dir_path))
train_df = pd.read_csv(
train_file_path,
sep=",",
index_col=False,
dtype={"Id": np.unicode, "Target": np.unicode})
train_file_ids = train_df["Id"].values.astype(np.unicode)
train_file_labels = train_df["Target"].values.astype(np.unicode)
image_count = len(train_file_ids)
if os.path.exists(split_file_path):
if generate_split:
logging.info("Split file already exists: {}".format(split_file_path))
slice_df = pd.read_csv(
split_file_path,
sep=",",
index_col=False,
)
categories = slice_df["Folder{}".format(working_split_folder_ind1)].values.astype(np.uint8)
else:
if not generate_split:
raise Exception("Split file doesn't exist: {}".format(split_file_path))
label_position_lists, label_counts = self.calc_label_position_lists(
train_file_labels=train_file_labels,
num_classes=num_classes)
assert (num_split_folders <= label_counts.min())
unique_label_position_lists, unique_label_counts = self.calc_unique_label_position_lists(
label_position_lists=label_position_lists,
label_counts=label_counts)
assert (image_count == unique_label_counts.sum())
dataset_folder_table = self.create_dataset_folder_table(
num_samples=image_count,
num_folders=num_split_folders,
unique_label_position_lists=unique_label_position_lists)
assert (image_count == dataset_folder_table.sum())
slice_df_dict = {"Id": train_file_ids}
slice_df_dict.update({"Folder{}".format(i + 1): dataset_folder_table[i]
for i in range(num_split_folders)})
slice_df = pd.DataFrame(slice_df_dict)
slice_df.to_csv(
split_file_path,
sep=',',
index=False)
categories = slice_df["Folder{}".format(working_split_folder_ind1)].values.astype(np.uint8)
if os.path.exists(stats_file_path):
if generate_stats:
logging.info("Stats file already exists: {}".format(stats_file_path))
with open(stats_file_path, "r") as f:
stats_dict = json.load(f)
mean_rgby = np.array(stats_dict["mean_rgby"], np.float32)
std_rgby = np.array(stats_dict["std_rgby"], np.float32)
label_counts = np.array(stats_dict["label_counts"], np.int32)
else:
if not generate_split:
raise Exception("Stats file doesn't exist: {}".format(stats_file_path))
label_counts = self.calc_label_counts(train_file_labels, num_classes)
mean_rgby, std_rgby = self.calc_image_widths(train_file_ids, self.suffices, images_dir_path)
stats_dict = {
"mean_rgby": [float(x) for x in mean_rgby],
"std_rgby": [float(x) for x in std_rgby],
"label_counts": [int(x) for x in label_counts],
}
with open(stats_file_path, 'w') as f:
json.dump(stats_dict, f)
self.label_widths = self.calc_label_widths(label_counts, num_classes)
self.mean_rgby = mean_rgby
self.std_rgby = std_rgby
mask = (categories == (0 if train else 1))
self.train_file_ids = train_file_ids[mask]
list_labels = train_file_labels[mask]
self.images_dir_path = images_dir_path
self.num_classes = num_classes
self.train = train
self.onehot_labels = self.calc_onehot_labels(
num_classes=num_classes,
list_labels=list_labels)
if train:
self._transform = KHPATrainTransform(
mean=self.mean_rgby,
std=self.std_rgby,
crop_image_size=model_input_image_size)
self.sample_weights = self.calc_sample_weights(
label_widths=self.label_widths,
list_labels=list_labels)
else:
self._transform = KHPAValTransform(
mean=self.mean_rgby,
std=self.std_rgby,
resize_image_size=preproc_resize_image_size,
crop_image_size=model_input_image_size)
def __str__(self):
return self.__class__.__name__ + "({})".format(len(self.train_file_ids))
def __len__(self):
return len(self.train_file_ids)
def __getitem__(self, idx):
image_prefix = self.train_file_ids[idx]
image_prefix_path = os.path.join(self.images_dir_path, image_prefix)
imgs = []
for suffix in self.suffices:
image_file_path = "{}_{}.png".format(image_prefix_path, suffix)
img = mx.image.imread(image_file_path, flag=0)
imgs += [img]
img = mx.nd.concat(*imgs, dim=2)
label = mx.nd.array(self.onehot_labels[idx])
if self._transform is not None:
img, label = self._transform(img, label)
return img, label
@staticmethod
def calc_onehot_labels(num_classes, list_labels):
num_samples = len(list_labels)
onehot_labels = np.zeros((num_samples, num_classes), np.int32)
for i, train_file_label in enumerate(list_labels):
label_str_list = train_file_label.split()
for label_str in label_str_list:
label_int = int(label_str)
onehot_labels[i, label_int] = 1
return onehot_labels
@staticmethod
def calc_sample_weights(label_widths, list_labels):
label_widths1 = label_widths / label_widths.sum()
num_samples = len(list_labels)
sample_weights = np.zeros((num_samples, ), np.float64)
for i, train_file_label in enumerate(list_labels):
label_str_list = train_file_label.split()
for label_str in label_str_list:
label_int = int(label_str)
# sample_weights[i] += label_widths1[label_int]
sample_weights[i] = max(sample_weights[i], label_widths1[label_int])
assert (sample_weights.min() > 0.0)
sample_weights /= sample_weights.sum()
sample_weights = sample_weights.astype(np.float32)
return sample_weights
@staticmethod
def calc_label_position_lists(train_file_labels, num_classes):
label_counts = np.zeros((num_classes, ), np.int32)
label_position_lists = [[] for _ in range(num_classes)]
for sample_ind, train_file_label in enumerate(train_file_labels):
label_str_list = train_file_label.split()
for label_str in label_str_list:
label_int = int(label_str)
assert (0 <= label_int < num_classes)
label_counts[label_int] += 1
label_position_lists[label_int] += [sample_ind]
assert ([len(x) for x in label_position_lists] == list(label_counts))
return label_position_lists, label_counts
@staticmethod
def calc_unique_label_position_lists(label_position_lists, label_counts):
unique_label_position_lists = label_position_lists.copy()
unique_label_counts = label_counts.copy()
order_inds = np.argsort(label_counts)
for i, class_ind_i in enumerate(order_inds):
for sample_ind in unique_label_position_lists[class_ind_i]:
for class_ind_k in order_inds[(i + 1):]:
if sample_ind in unique_label_position_lists[class_ind_k]:
unique_label_position_lists[class_ind_k].remove(sample_ind)
unique_label_counts[class_ind_k] -= 1
assert ([len(x) for x in unique_label_position_lists] == list(unique_label_counts))
return unique_label_position_lists, unique_label_counts
@staticmethod
def create_dataset_folder_table(num_samples, num_folders, unique_label_position_lists):
dataset_folder_table = np.zeros((num_folders, num_samples), np.uint8)
for label_position_list in unique_label_position_lists:
label_positions = np.array(label_position_list)
np.random.shuffle(label_positions)
split_list = np.array_split(label_positions, indices_or_sections=num_folders)
for folder_ind, folder_split_list in enumerate(split_list):
dataset_folder_table[folder_ind, folder_split_list] = 1
return dataset_folder_table
@staticmethod
def calc_label_counts(train_file_labels, num_classes):
label_counts = np.zeros((num_classes, ), np.int32)
for train_file_label in train_file_labels:
label_str_list = train_file_label.split()
for label_str in label_str_list:
label_int = int(label_str)
assert (0 <= label_int < num_classes)
label_counts[label_int] += 1
return label_counts
@staticmethod
def calc_label_widths(label_counts, num_classes):
total_label_count = label_counts.sum()
label_widths = (1.0 / label_counts) / num_classes * total_label_count
return label_widths
@staticmethod
def calc_image_widths(train_file_ids, suffices, images_dir_path):
logging.info("Calculating image widths...")
mean_rgby = np.zeros((len(suffices),), np.float32)
std_rgby = np.zeros((len(suffices),), np.float32)
for i, suffix in enumerate(suffices):
logging.info("Processing suffix: {}".format(suffix))
imgs = []
for image_prefix in train_file_ids:
image_prefix_path = os.path.join(images_dir_path, image_prefix)
image_file_path = "{}_{}.png".format(image_prefix_path, suffix)
img = mx.image.imread(image_file_path, flag=0).asnumpy()
imgs += [img]
imgs = np.concatenate(tuple(imgs), axis=2).flatten()
mean_rgby[i] = imgs.mean()
imgs = imgs.astype(np.float32, copy=False)
imgs -= mean_rgby[i]
imgs **= 2
std = np.sqrt(imgs.mean() * len(imgs) / (len(imgs) - 1))
std_rgby[i] = std
logging.info("i={}, mean={}, std={}".format(i, mean_rgby[i], std_rgby[i]))
return mean_rgby, std_rgby
class KHPATrainTransform(object):
def __init__(self,
mean=(0.0, 0.0, 0.0, 0.0),
std=(1.0, 1.0, 1.0, 1.0),
crop_image_size=(224, 224)):
if isinstance(crop_image_size, int):
crop_image_size = (crop_image_size, crop_image_size)
self._mean = mean
self._std = std
self.crop_image_size = crop_image_size
self.seq = iaa.Sequential(
children=[
iaa.Sequential(
children=[
iaa.Fliplr(
p=0.5,
name="Fliplr"),
iaa.Flipud(
p=0.5,
name="Flipud"),
iaa.Sequential(
children=[
iaa.Affine(
scale={"x": (0.9, 1.1), "y": (0.9, 1.1)},
translate_percent={"x": (-0.05, 0.05), "y": (-0.05, 0.05)},
rotate=(-45, 45),
shear=(-16, 16),
order=iap.Choice([0, 1, 3], p=[0.15, 0.80, 0.05]),
mode="reflect",
name="Affine"),
iaa.Sometimes(
p=0.01,
then_list=iaa.PiecewiseAffine(
scale=(0.0, 0.01),
nb_rows=(4, 20),
nb_cols=(4, 20),
order=iap.Choice([0, 1, 3], p=[0.15, 0.80, 0.05]),
mode="reflect",
name="PiecewiseAffine"))],
random_order=True,
name="GeomTransform"),
iaa.Sequential(
children=[
iaa.Sometimes(
p=0.75,
then_list=iaa.Add(
value=(-10, 10),
per_channel=0.5,
name="Brightness")),
iaa.Sometimes(
p=0.05,
then_list=iaa.Emboss(
alpha=(0.0, 0.5),
strength=(0.5, 1.2),
name="Emboss")),
iaa.Sometimes(
p=0.1,
then_list=iaa.Sharpen(
alpha=(0.0, 0.5),
lightness=(0.5, 1.2),
name="Sharpen")),
iaa.Sometimes(
p=0.25,
then_list=iaa.ContrastNormalization(
alpha=(0.5, 1.5),
per_channel=0.5,
name="ContrastNormalization"))
],
random_order=True,
name="ColorTransform"),
iaa.Sequential(
children=[
iaa.Sometimes(
p=0.5,
then_list=iaa.AdditiveGaussianNoise(
loc=0,
scale=(0.0, 10.0),
per_channel=0.5,
name="AdditiveGaussianNoise")),
iaa.Sometimes(
p=0.1,
then_list=iaa.SaltAndPepper(
p=(0, 0.001),
per_channel=0.5,
name="SaltAndPepper"))],
random_order=True,
name="Noise"),
iaa.OneOf(
children=[
iaa.Sometimes(
p=0.05,
then_list=iaa.MedianBlur(
k=3,
name="MedianBlur")),
iaa.Sometimes(
p=0.05,
then_list=iaa.AverageBlur(
k=(2, 4),
name="AverageBlur")),
iaa.Sometimes(
p=0.5,
then_list=iaa.GaussianBlur(
sigma=(0.0, 2.0),
name="GaussianBlur"))],
name="Blur"),
],
random_order=True,
name="MainProcess")])
def __call__(self, img, label):
# import cv2
# cv2.imshow(winname="src_img1", mat=img.asnumpy()[:, :, :3])
# cv2.imshow(winname="src_img2", mat=img.asnumpy()[:, :, 1:])
seq_det = self.seq.to_deterministic()
imgs_aug = img.asnumpy().copy()
# imgs_aug = seq_det.augment_images(img.asnumpy().transpose((2, 0, 1)))
imgs_aug[:, :, :3] = seq_det.augment_image(imgs_aug[:, :, :3])
imgs_aug[:, :, 3:] = seq_det.augment_image(imgs_aug[:, :, 3:])
# img_np = imgs_aug.transpose((1, 2, 0))
img_np = imgs_aug
# cv2.imshow(winname="dst_img1", mat=img_np[:, :, :3])
# cv2.imshow(winname="dst_img2", mat=img_np[:, :, 1:])
# cv2.waitKey(0)
img_np = img_np.astype(np.float32)
img_np = (img_np - self._mean) / self._std
img = mx.nd.array(img_np, ctx=img.context)
img = mx.image.random_size_crop(
src=img,
size=self.crop_image_size,
area=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interp=1)[0]
img = img.transpose((2, 0, 1))
return img, label
class KHPAValTransform(object):
def __init__(self,
mean=(0.0, 0.0, 0.0, 0.0),
std=(1.0, 1.0, 1.0, 1.0),
resize_image_size=(256, 256),
crop_image_size=(224, 224)):
if isinstance(crop_image_size, int):
crop_image_size = (crop_image_size, crop_image_size)
self._mean = mean
self._std = std
self.resize_image_size = resize_image_size
self.crop_image_size = crop_image_size
def __call__(self, img, label):
h, w, _ = img.shape
if h > w:
wsize = self.resize_image_size
hsize = int(h * wsize / w)
else:
hsize = self.resize_image_size
wsize = int(w * hsize / h)
img = mx.image.imresize(
src=img,
w=wsize,
h=hsize,
interp=1)
img = mx.image.center_crop(
src=img,
size=self.crop_image_size,
interp=1)[0]
img = img.astype(np.float32)
img = (img - mx.nd.array(self._mean, ctx=img.context)) / mx.nd.array(self._std, ctx=img.context)
img = img.transpose((2, 0, 1))
return img, label
class KHPAMetaInfo(object):
label = "KHPA"
root_dir_name = "khpa"
dataset_class = KHPA
num_training_samples = None
in_channels = 4
num_classes = 56
input_image_size = (224, 224)
| 20,192 | 41.511579 | 104 | py |
imgclsmob | imgclsmob-master/other/gluon/khpa/eval_gl_khpa.py | import argparse
import time
import logging
import mxnet as mx
from common.logger_utils import initialize_logging
from gluon.utils import prepare_mx_context, prepare_model, calc_net_weight_count
from other.gluon.khpa.khpa_utils import add_dataset_parser_arguments
from other.gluon.khpa.khpa_utils import get_batch_fn
from other.gluon.khpa.khpa_utils import get_val_data_source
from other.gluon.khpa.khpa_utils import validate
def parse_args():
parser = argparse.ArgumentParser(
description='Evaluate a model for image classification (Gluon/KHPA)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_dataset_parser_arguments(parser)
parser.add_argument(
'--model',
type=str,
required=True,
help='type of model to use. see model_provider for options.')
parser.add_argument(
'--use-pretrained',
action='store_true',
help='enable using pretrained model from gluon.')
parser.add_argument(
'--dtype',
type=str,
default='float32',
help='data type for training. default is float32')
parser.add_argument(
'--resume',
type=str,
default='',
help='resume from previously saved parameters if not None')
parser.add_argument(
'--num-gpus',
type=int,
default=0,
help='number of gpus to use.')
parser.add_argument(
'-j',
'--num-data-workers',
dest='num_workers',
default=4,
type=int,
help='number of preprocessing workers')
parser.add_argument(
'--batch-size',
type=int,
default=512,
help='training batch size per device (CPU/GPU).')
parser.add_argument(
'--save-dir',
type=str,
default='',
help='directory of saved models and log-files')
parser.add_argument(
'--logging-file-name',
type=str,
default='train.log',
help='filename of training log')
parser.add_argument(
'--log-packages',
type=str,
default='mxnet',
help='list of python packages for logging')
parser.add_argument(
'--log-pip-packages',
type=str,
default='mxnet-cu92',
help='list of pip packages for logging')
args = parser.parse_args()
return args
def test(net,
val_data,
batch_fn,
data_source_needs_reset,
dtype,
ctx,
calc_weight_count=False,
extended_log=False):
rmse_calc = mx.metric.RMSE()
tic = time.time()
rmse_val_value = validate(
metric_calc=rmse_calc,
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
if calc_weight_count:
weight_count = calc_net_weight_count(net)
logging.info('Model: {} trainable parameters'.format(weight_count))
if extended_log:
logging.info('Test: rmse={rmse:.4f} ({rmse})'.format(
rmse=rmse_val_value))
else:
logging.info('Test: rmse={rmse:.4f}'.format(
rmse=rmse_val_value))
logging.info('Time cost: {:.4f} sec'.format(
time.time() - tic))
def main():
args = parse_args()
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
ctx, batch_size = prepare_mx_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
dtype=args.dtype,
tune_layers="",
classes=args.num_classes,
in_channels=args.in_channels,
ctx=ctx)
input_image_size = net.in_size if hasattr(net, 'in_size') else (args.input_size, args.input_size)
val_data = get_val_data_source(
dataset_args=args,
batch_size=batch_size,
num_workers=args.num_workers,
input_image_size=input_image_size,
resize_inv_factor=args.resize_inv_factor)
batch_fn = get_batch_fn()
assert (args.use_pretrained or args.resume.strip())
test(
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=args.use_rec,
dtype=args.dtype,
ctx=ctx,
# calc_weight_count=(not log_file_exist),
calc_weight_count=True,
extended_log=True)
if __name__ == '__main__':
main()
| 4,686 | 27.23494 | 101 | py |
imgclsmob | imgclsmob-master/other/gluon/khpa/__init__.py | 0 | 0 | 0 | py | |
imgclsmob | imgclsmob-master/other/gluon/khpa/train_gl_khpa.py | import argparse
import time
import logging
import os
import numpy as np
import random
import mxnet as mx
from mxnet import gluon
from mxnet import autograd as ag
from common.logger_utils import initialize_logging
from common.train_log_param_saver import TrainLogParamSaver
from gluon.lr_scheduler import LRScheduler
from gluon.utils import prepare_mx_context, prepare_model
from other.gluon.khpa.khpa_utils import add_dataset_parser_arguments
from other.gluon.khpa.khpa_utils import get_batch_fn
from other.gluon.khpa.khpa_utils import get_train_data_source
from other.gluon.khpa.khpa_utils import get_val_data_source
from other.gluon.khpa.khpa_utils import validate
def parse_args():
parser = argparse.ArgumentParser(
description='Train a model for image classification (Gluon/KHPA)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
add_dataset_parser_arguments(parser)
parser.add_argument(
'--model',
type=str,
required=True,
help='type of model to use. see model_provider for options.')
parser.add_argument(
'--use-pretrained',
action='store_true',
help='enable using pretrained model from gluon.')
parser.add_argument(
'--dtype',
type=str,
default='float32',
help='data type for training')
parser.add_argument(
'--resume',
type=str,
default='',
help='resume from previously saved parameters if not None')
parser.add_argument(
'--resume-state',
type=str,
default='',
help='resume from previously saved optimizer state if not None')
parser.add_argument(
'--num-gpus',
type=int,
default=0,
help='number of gpus to use.')
parser.add_argument(
'-j',
'--num-data-workers',
dest='num_workers',
default=4,
type=int,
help='number of preprocessing workers')
parser.add_argument(
'--batch-size',
type=int,
default=512,
help='training batch size per device (CPU/GPU).')
parser.add_argument(
'--batch-size-scale',
type=int,
default=1,
help='manual batch-size increasing factor.')
parser.add_argument(
'--num-epochs',
type=int,
default=120,
help='number of training epochs.')
parser.add_argument(
'--start-epoch',
type=int,
default=1,
help='starting epoch for resuming, default is 1 for new training')
parser.add_argument(
'--attempt',
type=int,
default=1,
help='current number of training')
parser.add_argument(
'--optimizer-name',
type=str,
default='nag',
help='optimizer name')
parser.add_argument(
'--lr',
type=float,
default=0.1,
help='learning rate')
parser.add_argument(
'--lr-mode',
type=str,
default='cosine',
help='learning rate scheduler mode. options are step, poly and cosine')
parser.add_argument(
'--lr-decay',
type=float,
default=0.1,
help='decay rate of learning rate')
parser.add_argument(
'--lr-decay-period',
type=int,
default=0,
help='interval for periodic learning rate decays. default is 0 to disable.')
parser.add_argument(
'--lr-decay-epoch',
type=str,
default='40,60',
help='epoches at which learning rate decays')
parser.add_argument(
'--target-lr',
type=float,
default=1e-8,
help='ending learning rate')
parser.add_argument(
'--poly-power',
type=float,
default=2,
help='power value for poly LR scheduler')
parser.add_argument(
'--warmup-epochs',
type=int,
default=0,
help='number of warmup epochs.')
parser.add_argument(
'--warmup-lr',
type=float,
default=1e-8,
help='starting warmup learning rate')
parser.add_argument(
'--warmup-mode',
type=str,
default='linear',
help='learning rate scheduler warmup mode. options are linear, poly and constant')
parser.add_argument(
'--momentum',
type=float,
default=0.9,
help='momentum value for optimizer')
parser.add_argument(
'--wd',
type=float,
default=0.0001,
help='weight decay rate')
parser.add_argument(
'--gamma-wd-mult',
type=float,
default=1.0,
help='weight decay multiplier for batchnorm gamma')
parser.add_argument(
'--beta-wd-mult',
type=float,
default=1.0,
help='weight decay multiplier for batchnorm beta')
parser.add_argument(
'--bias-wd-mult',
type=float,
default=1.0,
help='weight decay multiplier for bias')
parser.add_argument(
'--grad-clip',
type=float,
default=None,
help='max_norm for gradient clipping')
parser.add_argument(
'--label-smoothing',
action='store_true',
help='use label smoothing')
parser.add_argument(
'--mixup',
action='store_true',
help='use mixup strategy')
parser.add_argument(
'--mixup-epoch-tail',
type=int,
default=20,
help='number of epochs without mixup at the end of training')
parser.add_argument(
'--log-interval',
type=int,
default=50,
help='number of batches to wait before logging.')
parser.add_argument(
'--save-interval',
type=int,
default=4,
help='saving parameters epoch interval, best model will always be saved')
parser.add_argument(
'--save-dir',
type=str,
default='',
help='directory of saved models and log-files')
parser.add_argument(
'--logging-file-name',
type=str,
default='train.log',
help='filename of training log')
parser.add_argument(
'--seed',
type=int,
default=-1,
help='Random seed to be fixed')
parser.add_argument(
'--log-packages',
type=str,
default='mxnet',
help='list of python packages for logging')
parser.add_argument(
'--log-pip-packages',
type=str,
default='mxnet-cu92',
help='list of pip packages for logging')
parser.add_argument(
'--tune-layers',
type=str,
default='',
help='Regexp for selecting layers for fine tuning')
args = parser.parse_args()
return args
def init_rand(seed):
if seed <= 0:
seed = np.random.randint(10000)
random.seed(seed)
np.random.seed(seed)
mx.random.seed(seed)
return seed
def prepare_trainer(net,
optimizer_name,
wd,
momentum,
lr_mode,
lr,
lr_decay_period,
lr_decay_epoch,
lr_decay,
target_lr,
poly_power,
warmup_epochs,
warmup_lr,
warmup_mode,
batch_size,
num_epochs,
num_training_samples,
dtype,
gamma_wd_mult=1.0,
beta_wd_mult=1.0,
bias_wd_mult=1.0,
state_file_path=None):
if gamma_wd_mult != 1.0:
for k, v in net.collect_params('.*gamma').items():
v.wd_mult = gamma_wd_mult
if beta_wd_mult != 1.0:
for k, v in net.collect_params('.*beta').items():
v.wd_mult = beta_wd_mult
if bias_wd_mult != 1.0:
for k, v in net.collect_params('.*bias').items():
v.wd_mult = bias_wd_mult
if lr_decay_period > 0:
lr_decay_epoch = list(range(lr_decay_period, num_epochs, lr_decay_period))
else:
lr_decay_epoch = [int(i) for i in lr_decay_epoch.split(',')]
num_batches = num_training_samples // batch_size
lr_scheduler = LRScheduler(
mode=lr_mode,
base_lr=lr,
n_iters=num_batches,
n_epochs=num_epochs,
step=lr_decay_epoch,
step_factor=lr_decay,
target_lr=target_lr,
power=poly_power,
warmup_epochs=warmup_epochs,
warmup_lr=warmup_lr,
warmup_mode=warmup_mode)
optimizer_params = {'learning_rate': lr,
'wd': wd,
'momentum': momentum,
'lr_scheduler': lr_scheduler}
if dtype != 'float32':
optimizer_params['multi_precision'] = True
trainer = gluon.Trainer(
params=net.collect_params(),
optimizer=optimizer_name,
optimizer_params=optimizer_params)
if (state_file_path is not None) and state_file_path and os.path.exists(state_file_path):
logging.info('Loading trainer states: {}'.format(state_file_path))
trainer.load_states(state_file_path)
if trainer._optimizer.wd != wd:
trainer._optimizer.wd = wd
logging.info('Reset the weight decay: {}'.format(wd))
# lr_scheduler = trainer._optimizer.lr_scheduler
trainer._optimizer.lr_scheduler = lr_scheduler
return trainer, lr_scheduler
def save_params(file_stem,
net,
trainer):
net.save_parameters(file_stem + '.params')
trainer.save_states(file_stem + '.states')
def train_epoch(epoch,
net,
metric_calc,
train_data,
batch_fn,
data_source_needs_reset,
dtype,
ctx,
loss_func,
trainer,
lr_scheduler,
batch_size,
log_interval,
grad_clip_value,
batch_size_scale):
batch_size_extend_count = 0
tic = time.time()
if data_source_needs_reset:
train_data.reset()
metric_calc.reset()
train_loss = 0.0
btic = time.time()
for i, batch in enumerate(train_data):
data_list, labels_list = batch_fn(batch, ctx)
onehot_labels_list = [Y.one_hot(depth=2) for Y in labels_list]
with ag.record():
onehot_outputs_list = [net(X.astype(dtype, copy=False)).reshape(0, -1, 2) for X in data_list]
loss_list = [loss_func(yhat, y.astype(dtype, copy=False)) for yhat, y in
zip(onehot_outputs_list, onehot_labels_list)]
for loss in loss_list:
loss.backward()
lr_scheduler.update(i, epoch)
if grad_clip_value is not None:
grads = [v.grad(ctx[0]) for v in net.collect_params().values() if v._grad is not None]
gluon.utils.clip_global_norm(grads, max_norm=grad_clip_value)
if batch_size_scale == 1:
trainer.step(batch_size)
else:
if (i + 1) % batch_size_scale == 0:
batch_size_extend_count = 0
trainer.step(batch_size * batch_size_scale)
for p in net.collect_params().values():
p.zero_grad()
else:
batch_size_extend_count += 1
train_loss += sum([loss.mean().asscalar() for loss in loss_list]) / len(loss_list)
labels_list_ = [Y.reshape(-1,) for Y in labels_list]
onehot_outputs_list_ = [Y.reshape(-1, 2) for Y in onehot_outputs_list]
metric_calc.update(
src_pts=labels_list_,
dst_pts=onehot_outputs_list_)
if log_interval and not (i + 1) % log_interval:
speed = batch_size * log_interval / (time.time() - btic)
btic = time.time()
metric_name, metric_value = metric_calc.get()
logging.info('Epoch[{}] Batch [{}]\tSpeed: {:.2f} samples/sec\t{}={:.4f}\tlr={:.5f}'.format(
epoch + 1, i, speed, metric_name, metric_value, trainer.learning_rate))
if (batch_size_scale != 1) and (batch_size_extend_count > 0):
trainer.step(batch_size * batch_size_extend_count)
for p in net.collect_params().values():
p.zero_grad()
throughput = int(batch_size * (i + 1) / (time.time() - tic))
logging.info('[Epoch {}] speed: {:.2f} samples/sec\ttime cost: {:.2f} sec'.format(
epoch + 1, throughput, time.time() - tic))
train_loss /= (i + 1)
metric_name, metric_value = metric_calc.get()
logging.info('[Epoch {}] training: {}={:.4f}\tloss={:.4f}'.format(
epoch + 1, metric_name, metric_value, train_loss))
return metric_name, metric_value, train_loss
def train_net(batch_size,
num_epochs,
start_epoch1,
train_data,
val_data,
batch_fn,
data_source_needs_reset,
dtype,
net,
trainer,
lr_scheduler,
lp_saver,
log_interval,
grad_clip_value,
batch_size_scale,
ctx):
if batch_size_scale != 1:
for p in net.collect_params().values():
p.grad_req = 'add'
if isinstance(ctx, mx.Context):
ctx = [ctx]
val_metric_calc = mx.metric.F1()
train_metric_calc = mx.metric.F1()
loss_func = gluon.loss.SigmoidBinaryCrossEntropyLoss()
assert (type(start_epoch1) == int)
assert (start_epoch1 >= 1)
if start_epoch1 > 1:
logging.info('Start training from [Epoch {}]'.format(start_epoch1))
val_metric_name_value = validate(
metric_calc=val_metric_calc,
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
logging.info('[Epoch {}] validation: {}={:.4f}'.format(
start_epoch1 - 1, val_metric_name_value[0], val_metric_name_value[1]))
gtic = time.time()
for epoch in range(start_epoch1 - 1, num_epochs):
train_metric_name, train_metric_value, train_loss = train_epoch(
epoch=epoch,
net=net,
metric_calc=train_metric_calc,
train_data=train_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx,
loss_func=loss_func,
trainer=trainer,
lr_scheduler=lr_scheduler,
batch_size=batch_size,
log_interval=log_interval,
grad_clip_value=grad_clip_value,
batch_size_scale=batch_size_scale)
val_metric_name, val_metric_value = validate(
metric_calc=val_metric_calc,
net=net,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=dtype,
ctx=ctx)
logging.info('[Epoch {}] validation: {}={:.4f}'.format(
epoch + 1, val_metric_name, val_metric_value))
if lp_saver is not None:
lp_saver_kwargs = {'net': net, 'trainer': trainer}
val_metric_value_dec = -val_metric_value
train_metric_value_dec = -train_metric_value
lp_saver.epoch_test_end_callback(
epoch1=(epoch + 1),
params=[val_metric_value_dec, train_metric_value_dec, train_loss, trainer.learning_rate],
**lp_saver_kwargs)
logging.info('Total time cost: {:.2f} sec'.format(time.time() - gtic))
if lp_saver is not None:
logging.info('Best err-top5: {:.4f} at {} epoch'.format(
lp_saver.best_eval_metric_value, lp_saver.best_eval_metric_epoch))
def main():
args = parse_args()
args.seed = init_rand(seed=args.seed)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
ctx, batch_size = prepare_mx_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
dtype=args.dtype,
tune_layers=args.tune_layers,
classes=args.num_classes,
in_channels=args.in_channels,
ctx=ctx)
assert (hasattr(net, 'classes'))
assert (hasattr(net, 'in_size'))
# num_classes = net.classes if hasattr(net, 'classes') else 1000
input_image_size = net.in_size if hasattr(net, 'in_size') else (args.input_size, args.input_size)
train_data = get_train_data_source(
dataset_args=args,
batch_size=batch_size,
num_workers=args.num_workers,
input_image_size=input_image_size)
val_data = get_val_data_source(
dataset_args=args,
batch_size=batch_size,
num_workers=args.num_workers,
input_image_size=input_image_size,
resize_inv_factor=args.resize_inv_factor)
batch_fn = get_batch_fn()
num_training_samples = len(train_data._dataset)
data_source_needs_reset = False
trainer, lr_scheduler = prepare_trainer(
net=net,
optimizer_name=args.optimizer_name,
wd=args.wd,
momentum=args.momentum,
lr_mode=args.lr_mode,
lr=args.lr,
lr_decay_period=args.lr_decay_period,
lr_decay_epoch=args.lr_decay_epoch,
lr_decay=args.lr_decay,
target_lr=args.target_lr,
poly_power=args.poly_power,
warmup_epochs=args.warmup_epochs,
warmup_lr=args.warmup_lr,
warmup_mode=args.warmup_mode,
batch_size=batch_size,
num_epochs=args.num_epochs,
num_training_samples=num_training_samples,
dtype=args.dtype,
gamma_wd_mult=args.gamma_wd_mult,
beta_wd_mult=args.beta_wd_mult,
bias_wd_mult=args.bias_wd_mult,
state_file_path=args.resume_state)
if args.save_dir and args.save_interval:
metric_type = "F1"
lp_saver = TrainLogParamSaver(
checkpoint_file_name_prefix='imagenet_{}'.format(args.model),
last_checkpoint_file_name_suffix="last",
best_checkpoint_file_name_suffix=None,
last_checkpoint_dir_path=args.save_dir,
best_checkpoint_dir_path=None,
last_checkpoint_file_count=2,
best_checkpoint_file_count=2,
checkpoint_file_save_callback=save_params,
checkpoint_file_exts=('.params', '.states'),
save_interval=args.save_interval,
num_epochs=args.num_epochs,
param_names=['Val.' + metric_type, 'Train.' + metric_type, 'Train.Loss', 'LR'],
acc_ind=0,
# bigger=[True],
# mask=None,
score_log_file_path=os.path.join(args.save_dir, 'score.log'),
score_log_attempt_value=args.attempt,
best_map_log_file_path=os.path.join(args.save_dir, 'best_map.log'))
else:
lp_saver = None
train_net(
batch_size=batch_size,
num_epochs=args.num_epochs,
start_epoch1=args.start_epoch,
train_data=train_data,
val_data=val_data,
batch_fn=batch_fn,
data_source_needs_reset=data_source_needs_reset,
dtype=args.dtype,
net=net,
trainer=trainer,
lr_scheduler=lr_scheduler,
lp_saver=lp_saver,
log_interval=args.log_interval,
grad_clip_value=args.grad_clip,
batch_size_scale=args.batch_size_scale,
ctx=ctx)
if __name__ == '__main__':
main()
| 19,879 | 31.012882 | 105 | py |
imgclsmob | imgclsmob-master/other/pytorch/imagenet1k1.py | import math
import os
import cv2
import numpy as np
from PIL import Image
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
__all__ = ['add_dataset_parser_arguments', 'get_train_data_loader', 'get_val_data_loader']
def add_dataset_parser_arguments(parser):
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/imagenet',
help='path to directory with ImageNet-1K dataset')
parser.add_argument(
'--input-size',
type=int,
default=224,
help='size of the input for model')
parser.add_argument(
'--resize-inv-factor',
type=float,
default=0.875,
help='inverted ratio for input image crop')
parser.add_argument(
'--num-classes',
type=int,
default=1000,
help='number of classes')
parser.add_argument(
'--in-channels',
type=int,
default=3,
help='number of input channels')
parser.add_argument(
'--use-cv-resize',
action='store_true',
help='use OpenCV resize preprocessing')
def cv_loader(path):
img = cv2.imread(path, flags=1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
class CvResize(object):
"""
Resize the input PIL Image to the given size via OpenCV.
Parameters:
----------
size : int or tuple of (W, H)
Size of output image.
interpolation : int, default PIL.Image.BILINEAR
Interpolation method for resizing. By default uses bilinear
interpolation.
"""
def __init__(self,
size,
interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, img):
"""
Resize image.
Parameters:
----------
img : PIL.Image
input image.
Returns:
-------
PIL.Image
Resulted image.
"""
if self.interpolation == Image.NEAREST:
cv_interpolation = cv2.INTER_NEAREST
elif self.interpolation == Image.BILINEAR:
cv_interpolation = cv2.INTER_LINEAR
elif self.interpolation == Image.BICUBIC:
cv_interpolation = cv2.INTER_CUBIC
elif self.interpolation == Image.LANCZOS:
cv_interpolation = cv2.INTER_LANCZOS4
else:
raise ValueError()
cv_img = np.array(img)
if isinstance(self.size, int):
w, h = img.size
if (w <= h and w == self.size) or (h <= w and h == self.size):
return img
if w < h:
out_size = (self.size, int(self.size * h / w))
else:
out_size = (int(self.size * w / h), self.size)
cv_img = cv2.resize(cv_img, dsize=out_size, interpolation=cv_interpolation)
return Image.fromarray(cv_img)
else:
cv_img = cv2.resize(cv_img, dsize=self.size, interpolation=cv_interpolation)
return Image.fromarray(cv_img)
def get_train_data_loader(data_dir,
batch_size,
num_workers,
input_image_size=224):
mean_rgb = (0.485, 0.456, 0.406)
std_rgb = (0.229, 0.224, 0.225)
jitter_param = 0.4
transform_train = transforms.Compose([
transforms.RandomResizedCrop(input_image_size),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=jitter_param,
contrast=jitter_param,
saturation=jitter_param),
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)])
dataset = datasets.ImageFolder(
root=os.path.join(data_dir, 'train'),
transform=transform_train)
train_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True)
return train_loader
def get_val_data_loader(data_dir,
batch_size,
num_workers,
input_image_size=224,
resize_inv_factor=0.875,
use_cv_resize=False):
assert (resize_inv_factor > 0.0)
resize_value = int(math.ceil(float(input_image_size) / resize_inv_factor))
mean_rgb = (0.485, 0.456, 0.406)
std_rgb = (0.229, 0.224, 0.225)
transform_test = transforms.Compose([
CvResize(resize_value) if use_cv_resize else transforms.Resize(resize_value),
transforms.CenterCrop(input_image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)])
dataset = datasets.ImageFolder(
root=os.path.join(data_dir, 'val'),
transform=transform_test)
val_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
return val_loader
| 5,152 | 27.469613 | 90 | py |
imgclsmob | imgclsmob-master/other/pytorch/cub200_2011_utils1.py | """
CUB-200-2011 fine-grained classification dataset routines.
"""
__all__ = ['add_dataset_parser_arguments', 'get_train_data_loader', 'get_val_data_loader']
import math
import torch.utils.data
import torchvision.transforms as transforms
from pytorch.datasets.cub200_2011_cls_dataset import CUB200_2011
def add_dataset_parser_arguments(parser):
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/CUB_200_2011',
help='path to directory with CUB-200-2011 dataset')
parser.add_argument(
'--input-size',
type=int,
default=448,
help='size of the input for model')
parser.add_argument(
'--resize-inv-factor',
type=float,
default=0.74667,
help='inverted ratio for input image crop')
parser.add_argument(
'--num-classes',
type=int,
default=200,
help='number of classes')
parser.add_argument(
'--in-channels',
type=int,
default=3,
help='number of input channels')
def get_train_data_loader(dataset_dir,
batch_size,
num_workers,
input_image_size=448):
mean_rgb = (0.485, 0.456, 0.406)
std_rgb = (0.229, 0.224, 0.225)
jitter_param = 0.4
transform_train = transforms.Compose([
transforms.RandomResizedCrop(input_image_size),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=jitter_param,
contrast=jitter_param,
saturation=jitter_param),
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)])
dataset = CUB200_2011(
root=dataset_dir,
train=True,
transform=transform_train)
train_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True)
return train_loader
def get_val_data_loader(dataset_dir,
batch_size,
num_workers,
input_image_size=448,
resize_inv_factor=0.74667):
assert (resize_inv_factor > 0.0)
resize_value = int(math.ceil(float(input_image_size) / resize_inv_factor))
mean_rgb = (0.485, 0.456, 0.406)
std_rgb = (0.229, 0.224, 0.225)
transform_val = transforms.Compose([
transforms.Resize(resize_value),
transforms.CenterCrop(input_image_size),
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb)
])
dataset = CUB200_2011(
root=dataset_dir,
train=False,
transform=transform_val)
val_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
return val_loader
| 3,014 | 26.409091 | 90 | py |
imgclsmob | imgclsmob-master/other/pytorch/cifar1.py | """
CIFAR/SVHN dataset routines.
"""
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
__all__ = ['add_dataset_parser_arguments', 'get_train_data_loader', 'get_val_data_loader']
def add_dataset_parser_arguments(parser,
dataset_name):
if dataset_name == "CIFAR10":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/cifar10',
help='path to directory with CIFAR-10 dataset')
parser.add_argument(
'--num-classes',
type=int,
default=10,
help='number of classes')
elif dataset_name == "CIFAR100":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/cifar100',
help='path to directory with CIFAR-100 dataset')
parser.add_argument(
'--num-classes',
type=int,
default=100,
help='number of classes')
elif dataset_name == "SVHN":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/svhn',
help='path to directory with SVHN dataset')
parser.add_argument(
'--num-classes',
type=int,
default=10,
help='number of classes')
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
parser.add_argument(
'--in-channels',
type=int,
default=3,
help='number of input channels')
def get_train_data_loader(dataset_name,
dataset_dir,
batch_size,
num_workers):
mean_rgb = (0.4914, 0.4822, 0.4465)
std_rgb = (0.2023, 0.1994, 0.2010)
jitter_param = 0.4
transform_train = transforms.Compose([
transforms.RandomCrop(size=32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=jitter_param,
contrast=jitter_param,
saturation=jitter_param),
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb),
])
if dataset_name == "CIFAR10":
dataset = datasets.CIFAR10(
root=dataset_dir,
train=True,
transform=transform_train,
download=True)
elif dataset_name == "CIFAR100":
dataset = datasets.CIFAR100(
root=dataset_dir,
train=True,
transform=transform_train,
download=True)
elif dataset_name == "SVHN":
dataset = datasets.SVHN(
root=dataset_dir,
split="train",
transform=transform_train,
download=True)
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
train_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True)
return train_loader
def get_val_data_loader(dataset_name,
dataset_dir,
batch_size,
num_workers):
mean_rgb = (0.4914, 0.4822, 0.4465)
std_rgb = (0.2023, 0.1994, 0.2010)
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb),
])
if dataset_name == "CIFAR10":
dataset = datasets.CIFAR10(
root=dataset_dir,
train=False,
transform=transform_val,
download=True)
elif dataset_name == "CIFAR100":
dataset = datasets.CIFAR100(
root=dataset_dir,
train=False,
transform=transform_val,
download=True)
elif dataset_name == "SVHN":
dataset = datasets.SVHN(
root=dataset_dir,
split="test",
transform=transform_val,
download=True)
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
val_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
return val_loader
| 4,409 | 28.205298 | 90 | py |
imgclsmob | imgclsmob-master/other/pytorch/__init__.py | 0 | 0 | 0 | py | |
imgclsmob | imgclsmob-master/other/pytorch/seg_utils.py | """
Segmentation datasets (VOC2012/ADE20K/Cityscapes/COCO) routines.
"""
__all__ = ['add_dataset_parser_arguments', 'get_test_data_loader', 'validate1', 'get_metainfo']
from tqdm import tqdm
import torch.utils.data
import torchvision.transforms as transforms
from pytorch.datasets.voc_seg_dataset import VOCSegDataset
from pytorch.datasets.ade20k_seg_dataset import ADE20KSegDataset
from pytorch.datasets.cityscapes_seg_dataset import CityscapesSegDataset
from pytorch.datasets.coco_seg_dataset import CocoSegDataset
# import torchvision.datasets as datasets
def add_dataset_parser_arguments(parser,
dataset_name):
if dataset_name == "VOC":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/voc',
help='path to directory with Pascal VOC2012 dataset')
parser.add_argument(
'--num-classes',
type=int,
default=21,
help='number of classes')
elif dataset_name == "ADE20K":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/ade20k',
help='path to directory with ADE20K dataset')
parser.add_argument(
'--num-classes',
type=int,
default=150,
help='number of classes')
elif dataset_name == "Cityscapes":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/cityscapes',
help='path to directory with Cityscapes dataset')
parser.add_argument(
'--num-classes',
type=int,
default=19,
help='number of classes')
elif dataset_name == "COCO":
parser.add_argument(
'--data-dir',
type=str,
default='../imgclsmob_data/coco',
help='path to directory with COCO dataset')
parser.add_argument(
'--num-classes',
type=int,
default=21,
help='number of classes')
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
parser.add_argument(
'--in-channels',
type=int,
default=3,
help='number of input channels')
parser.add_argument(
'--image-base-size',
type=int,
default=520,
help='base image size')
parser.add_argument(
'--image-crop-size',
type=int,
default=480,
help='crop image size')
def get_metainfo(dataset_name):
if dataset_name == "VOC":
return {
"vague_idx": VOCSegDataset.vague_idx,
"use_vague": VOCSegDataset.use_vague,
"background_idx": VOCSegDataset.background_idx,
"ignore_bg": VOCSegDataset.ignore_bg}
elif dataset_name == "ADE20K":
return {
"vague_idx": ADE20KSegDataset.vague_idx,
"use_vague": ADE20KSegDataset.use_vague,
"background_idx": ADE20KSegDataset.background_idx,
"ignore_bg": ADE20KSegDataset.ignore_bg}
elif dataset_name == "Cityscapes":
return {
"vague_idx": CityscapesSegDataset.vague_idx,
"use_vague": CityscapesSegDataset.use_vague,
"background_idx": CityscapesSegDataset.background_idx,
"ignore_bg": CityscapesSegDataset.ignore_bg}
elif dataset_name == "COCO":
return {
"vague_idx": CocoSegDataset.vague_idx,
"use_vague": CocoSegDataset.use_vague,
"background_idx": CocoSegDataset.background_idx,
"ignore_bg": CocoSegDataset.ignore_bg}
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
def get_test_data_loader(dataset_name,
dataset_dir,
batch_size,
num_workers):
mean_rgb = (0.485, 0.456, 0.406)
std_rgb = (0.229, 0.224, 0.225)
transform_val = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=mean_rgb,
std=std_rgb),
])
if dataset_name == "VOC":
dataset_class = VOCSegDataset
elif dataset_name == "ADE20K":
dataset_class = ADE20KSegDataset
elif dataset_name == "Cityscapes":
dataset_class = CityscapesSegDataset
elif dataset_name == "COCO":
dataset_class = CocoSegDataset
else:
raise Exception('Unrecognized dataset: {}'.format(dataset_name))
dataset = dataset_class(
root=dataset_dir,
mode="test",
transform=transform_val)
val_loader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True)
return val_loader
def validate1(accuracy_metrics,
net,
val_data,
use_cuda):
net.eval()
for metric in accuracy_metrics:
metric.reset()
with torch.no_grad():
for data, target in tqdm(val_data):
if use_cuda:
target = target.cuda(non_blocking=True)
output = net(data)
for metric in accuracy_metrics:
metric.update(target, output)
accuracy_info = [metric.get() for metric in accuracy_metrics]
return accuracy_info
| 5,401 | 31.347305 | 95 | py |
imgclsmob | imgclsmob-master/tensorflow_/setup.py | from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='tensorflowcv',
version='0.0.38',
description='Image classification models for TensorFlow',
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/osmr/imgclsmob',
author='Oleg Sémery',
author_email='osemery@gmail.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Image Recognition',
],
keywords='machine-learning deep-learning neuralnetwork image-classification tensorflow imagenet vgg resnet resnext '
'senet densenet darknet squeezenet squeezenext shufflenet menet mobilenent igcv3 mnasnet',
packages=find_packages(exclude=['others', '*.others', 'others.*', '*.others.*']),
include_package_data=True,
install_requires=['numpy', 'requests'],
)
| 1,267 | 37.424242 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow_/utils.py | import numpy as np
import tensorflow as tf
from .tensorflowcv.model_provider import get_model
from .tensorflowcv.models.common import is_channels_first
def save_model_params(sess,
file_path):
# assert file_path.endswith('.npz')
param_dict = {v.name: v.eval(sess) for v in tf.global_variables()}
np.savez_compressed(file_path, **param_dict)
def load_model_params(net,
param_dict,
sess,
ignore_missing=False):
for param_name, param_data in param_dict:
with tf.variable_scope(param_name, reuse=True):
try:
var = tf.get_variable(param_name)
sess.run(var.assign(param_data))
except ValueError:
if not ignore_missing:
raise
def prepare_model(model_name,
use_pretrained,
pretrained_model_file_path):
data_format = "channels_first"
kwargs = {"pretrained": use_pretrained, "data_format": data_format}
net = get_model(model_name, **kwargs)
input_image_size = net.in_size[0] if hasattr(net, 'in_size') else 224
x_shape = (None, 3, input_image_size, input_image_size) if is_channels_first(data_format) else\
(None, input_image_size, input_image_size, 3)
x = tf.placeholder(
dtype=tf.float32,
shape=x_shape,
name='xx')
y_net = net(x)
if use_pretrained or pretrained_model_file_path:
from .tensorflowcv.model_provider import init_variables_from_state_dict
with tf.Session() as sess:
from .tensorflowcv.model_provider import load_state_dict
if pretrained_model_file_path:
init_variables_from_state_dict(
sess=sess,
state_dict=load_state_dict(file_path=pretrained_model_file_path))
else:
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
return y_net
| 1,997 | 33.448276 | 99 | py |
imgclsmob | imgclsmob-master/tensorflow_/utils_tp.py | import math
import logging
import os
import multiprocessing
import numpy as np
import cv2
import tensorflow as tf
from tensorpack.models import regularize_cost
from tensorpack.tfutils.summary import add_moving_summary
# from tensorpack.tfutils.summary import add_tensor_summary
from tensorpack import ModelDesc, get_current_tower_context
from tensorpack import InputDesc, PlaceholderInput, TowerContext
from tensorpack.tfutils import get_model_loader, model_utils
# from tensorpack.tfutils import get_default_sess_config
from tensorpack.dataflow import imgaug, dataset, AugmentImageComponent, PrefetchDataZMQ, BatchData
# from tensorpack.dataflow import PrefetchData
from tensorpack.dataflow import MultiThreadMapData
# from tensorpack.dataflow import MapData
from tensorpack.utils import logger
from .tensorflowcv.model_provider import get_model
from .tensorflowcv.models.common import is_channels_first
class CachedChiefSessionCreator(tf.train.ChiefSessionCreator):
def __init__(self,
scaffold=None,
master="",
config=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None):
super(CachedChiefSessionCreator, self).__init__(
scaffold=scaffold,
master=master,
config=config,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path)
self.cached_sess = None
def create_session(self):
if self.cached_sess is None:
self.cached_sess = super(CachedChiefSessionCreator, self).create_session()
return self.cached_sess
class ImageNetModel(ModelDesc):
def __init__(self,
model_lambda,
image_size=224,
data_format="channels_last",
**kwargs):
super(ImageNetModel, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.model_lambda = model_lambda
self.image_size = image_size
self.image_dtype = tf.float32
self.data_format = data_format
self.label_smoothing = 0.0
self.loss_scale = 1.0
self.weight_decay = 1e-4
"""
Whether the image is BGR or RGB. If using DataFlow, then it should be BGR.
"""
self.image_bgr = False
"""
To apply on normalization parameters, use '.*/W|.*/gamma|.*/beta'
"""
self.weight_decay_pattern = ".*/kernel"
def inputs(self):
return [tf.placeholder(self.image_dtype, (None, self.image_size, self.image_size, 3), "input"),
tf.placeholder(tf.int32, (None,), "label")]
def build_graph(self,
image,
label):
image = self.image_preprocess(image)
if is_channels_first(self.data_format):
image = tf.transpose(image, [0, 3, 1, 2], name="image_transpose")
# tf.summary.image('input_image_', image)
# tf.summary.tensor_summary('input_tensor_', image)
# with tf.name_scope('tmp1_summaries'):
# add_tensor_summary(image, ['histogram', 'rms', 'sparsity'], name='tmp1_tensor')
is_training = get_current_tower_context().is_training
logits = self.model_lambda(
x=image,
training=is_training)
loss = ImageNetModel.compute_loss_and_error(
logits=logits,
label=label,
label_smoothing=self.label_smoothing)
if self.weight_decay > 0:
wd_loss = regularize_cost(
regex=self.weight_decay_pattern,
func=tf.contrib.layers.l2_regularizer(self.weight_decay),
name="l2_regularize_loss")
add_moving_summary(loss, wd_loss)
total_cost = tf.add_n([loss, wd_loss], name="cost")
else:
total_cost = tf.identity(loss, name="cost")
add_moving_summary(total_cost)
if self.loss_scale != 1.0:
logger.info("Scaling the total loss by {} ...".format(self.loss_scale))
return total_cost * self.loss_scale
else:
return total_cost
def optimizer(self):
lr = tf.get_variable("learning_rate", initializer=0.1, trainable=False)
tf.summary.scalar("learning_rate-summary", lr)
return tf.train.MomentumOptimizer(
learning_rate=lr,
momentum=0.9,
use_nesterov=True)
def image_preprocess(self,
image):
with tf.name_scope("image_preprocess"):
if image.dtype.base_dtype != tf.float32:
image = tf.cast(image, tf.float32)
mean = np.array([0.485, 0.456, 0.406], np.float32) * 255.0 # rgb
std = np.array([0.229, 0.224, 0.225], np.float32) * 255.0
if self.image_bgr:
mean = mean[::-1]
std = std[::-1]
image_mean = tf.constant(mean, dtype=tf.float32)
image_std = tf.constant(std, dtype=tf.float32)
image = (image - image_mean) / image_std
return image
@staticmethod
def compute_loss_and_error(logits,
label,
label_smoothing=0.0):
if label_smoothing == 0.0:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits,
labels=label)
else:
num_classes = logits.shape[-1]
loss = tf.losses.softmax_cross_entropy(
onehot_labels=tf.one_hot(label, num_classes),
logits=logits,
label_smoothing=label_smoothing)
loss = tf.reduce_mean(loss, name="xentropy-loss")
def prediction_incorrect(logits, label, topk=1, name="incorrect_vector"):
with tf.name_scope("prediction_incorrect"):
x = tf.logical_not(tf.nn.in_top_k(predictions=logits, targets=label, k=topk))
return tf.cast(x, tf.float32, name=name)
error_top1 = prediction_incorrect(logits, label, topk=1, name="wrong-top1")
add_moving_summary(tf.reduce_mean(error_top1, name="train-error-top1"))
error_top5 = prediction_incorrect(logits, label, topk=5, name="wrong-top5")
add_moving_summary(tf.reduce_mean(error_top5, name="train-error-top5"))
return loss
class GoogleNetResize(imgaug.ImageAugmentor):
"""
crop 8%~100% of the original image
See `Going Deeper with Convolutions` by Google.
"""
def __init__(self,
crop_area_fraction=0.08,
aspect_ratio_low=0.75,
aspect_ratio_high=1.333,
target_shape=224):
self._init(locals())
def _augment(self, img, _):
h, w = img.shape[:2]
area = h * w
for _ in range(10):
targetArea = self.rng.uniform(self.crop_area_fraction, 1.0) * area
aspectR = self.rng.uniform(self.aspect_ratio_low, self.aspect_ratio_high)
ww = int(np.sqrt(targetArea * aspectR) + 0.5)
hh = int(np.sqrt(targetArea / aspectR) + 0.5)
if self.rng.uniform() < 0.5:
ww, hh = hh, ww
if hh <= h and ww <= w:
x1 = 0 if w == ww else self.rng.randint(0, w - ww)
y1 = 0 if h == hh else self.rng.randint(0, h - hh)
out = img[y1:y1 + hh, x1:x1 + ww]
out = cv2.resize(out, (self.target_shape, self.target_shape), interpolation=cv2.INTER_CUBIC)
return out
out = imgaug.ResizeShortestEdge(self.target_shape, interp=cv2.INTER_CUBIC).augment(img)
out = imgaug.CenterCrop(self.target_shape).augment(out)
return out
def get_imagenet_dataflow(datadir,
is_train,
batch_size,
augmentors,
parallel=None):
"""
See explanations in the tutorial:
http://tensorpack.readthedocs.io/en/latest/tutorial/efficient-dataflow.html
"""
assert datadir is not None
assert isinstance(augmentors, list)
if parallel is None:
parallel = min(40, multiprocessing.cpu_count() // 2) # assuming hyperthreading
if is_train:
ds = dataset.ILSVRC12(datadir, "train", shuffle=True)
ds = AugmentImageComponent(ds, augmentors, copy=False)
if parallel < 16:
logging.warning("DataFlow may become the bottleneck when too few processes are used.")
ds = PrefetchDataZMQ(ds, parallel)
ds = BatchData(ds, batch_size, remainder=False)
else:
ds = dataset.ILSVRC12Files(datadir, "val", shuffle=False)
aug = imgaug.AugmentorList(augmentors)
def mapf(dp):
fname, cls = dp
im = cv2.imread(fname, cv2.IMREAD_COLOR)
im = np.flip(im, axis=2)
# print("fname={}".format(fname))
im = aug.augment(im)
return im, cls
ds = MultiThreadMapData(ds, parallel, mapf, buffer_size=2000, strict=True)
# ds = MapData(ds, mapf)
ds = BatchData(ds, batch_size, remainder=True)
ds = PrefetchDataZMQ(ds, 1)
# ds = PrefetchData(ds, 1)
return ds
def prepare_tf_context(num_gpus,
batch_size):
batch_size *= max(1, num_gpus)
return batch_size
def prepare_model(model_name,
use_pretrained,
pretrained_model_file_path,
data_format="channels_last"):
kwargs = {"pretrained": use_pretrained}
raw_net = get_model(
name=model_name,
data_format=data_format,
**kwargs)
input_image_size = raw_net.in_size[0] if hasattr(raw_net, "in_size") else 224
net = ImageNetModel(
model_lambda=raw_net,
image_size=input_image_size,
data_format=data_format)
if use_pretrained and not pretrained_model_file_path:
pretrained_model_file_path = raw_net.file_path
inputs_desc = None
if pretrained_model_file_path:
assert (os.path.isfile(pretrained_model_file_path))
logging.info("Loading model: {}".format(pretrained_model_file_path))
inputs_desc = get_model_loader(pretrained_model_file_path)
return net, inputs_desc
def get_data(is_train,
batch_size,
data_dir_path,
input_image_size=224,
resize_inv_factor=0.875):
assert (resize_inv_factor > 0.0)
resize_value = int(math.ceil(float(input_image_size) / resize_inv_factor))
if is_train:
augmentors = [
GoogleNetResize(
crop_area_fraction=0.08,
target_shape=input_image_size),
imgaug.RandomOrderAug([
imgaug.BrightnessScale((0.6, 1.4), clip=False),
imgaug.Contrast((0.6, 1.4), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion for the constants copied from fb.resnet.torch
imgaug.Lighting(
0.1,
eigval=np.asarray([0.2175, 0.0188, 0.0045][::-1]) * 255.0,
eigvec=np.array([
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]], dtype="float32")[::-1, ::-1])]),
imgaug.Flip(horiz=True)]
else:
augmentors = [
# imgaug.ResizeShortestEdge(resize_value, cv2.INTER_CUBIC),
imgaug.ResizeShortestEdge(resize_value, cv2.INTER_LINEAR),
imgaug.CenterCrop((input_image_size, input_image_size))
]
return get_imagenet_dataflow(
datadir=data_dir_path,
is_train=is_train,
batch_size=batch_size,
augmentors=augmentors)
def calc_flops(model):
# manually build the graph with batch=1
input_desc = [
InputDesc(tf.float32, [1, model.image_size, model.image_size, 3], "input"),
InputDesc(tf.int32, [1], "label")
]
input = PlaceholderInput()
input.setup(input_desc)
with TowerContext("", is_training=False):
model.build_graph(*input.get_input_tensors())
model_utils.describe_trainable_vars()
tf.profiler.profile(
tf.get_default_graph(),
cmd="op",
options=tf.profiler.ProfileOptionBuilder.float_operation())
logger.info("Note that TensorFlow counts flops in a different way from the paper.")
logger.info("TensorFlow counts multiply+add as two flops, however the paper counts them "
"as 1 flop because it can be executed in one instruction.")
| 12,668 | 36.482249 | 108 | py |
imgclsmob | imgclsmob-master/tensorflow_/__init__.py | 0 | 0 | 0 | py | |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/__init__.py | 0 | 0 | 0 | py | |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/model_provider.py | from .models.alexnet import *
from .models.zfnet import *
from .models.vgg import *
from .models.resnet import *
from .models.preresnet import *
from .models.resnext import *
from .models.seresnet import *
from .models.sepreresnet import *
from .models.seresnext import *
from .models.senet import *
from .models.densenet import *
from .models.darknet import *
from .models.darknet53 import *
from .models.channelnet import *
from .models.squeezenet import *
from .models.squeezenext import *
from .models.shufflenet import *
from .models.shufflenetv2 import *
from .models.shufflenetv2b import *
from .models.menet import *
from .models.mobilenet import *
from .models.mobilenetv2 import *
from .models.mobilenetv3 import *
from .models.igcv3 import *
from .models.mnasnet import *
__all__ = ['get_model', 'init_variables_from_state_dict']
_models = {
'alexnet': alexnet,
'alexnetb': alexnetb,
'zfnet': zfnet,
'zfnetb': zfnetb,
'vgg11': vgg11,
'vgg13': vgg13,
'vgg16': vgg16,
'vgg19': vgg19,
'bn_vgg11': bn_vgg11,
'bn_vgg13': bn_vgg13,
'bn_vgg16': bn_vgg16,
'bn_vgg19': bn_vgg19,
'bn_vgg11b': bn_vgg11b,
'bn_vgg13b': bn_vgg13b,
'bn_vgg16b': bn_vgg16b,
'bn_vgg19b': bn_vgg19b,
'resnet10': resnet10,
'resnet12': resnet12,
'resnet14': resnet14,
'resnetbc14b': resnetbc14b,
'resnet16': resnet16,
'resnet18_wd4': resnet18_wd4,
'resnet18_wd2': resnet18_wd2,
'resnet18_w3d4': resnet18_w3d4,
'resnet18': resnet18,
'resnet26': resnet26,
'resnetbc26b': resnetbc26b,
'resnet34': resnet34,
'resnetbc38b': resnetbc38b,
'resnet50': resnet50,
'resnet50b': resnet50b,
'resnet101': resnet101,
'resnet101b': resnet101b,
'resnet152': resnet152,
'resnet152b': resnet152b,
'resnet200': resnet200,
'resnet200b': resnet200b,
'preresnet10': preresnet10,
'preresnet12': preresnet12,
'preresnet14': preresnet14,
'preresnetbc14b': preresnetbc14b,
'preresnet16': preresnet16,
'preresnet18_wd4': preresnet18_wd4,
'preresnet18_wd2': preresnet18_wd2,
'preresnet18_w3d4': preresnet18_w3d4,
'preresnet18': preresnet18,
'preresnet26': preresnet26,
'preresnetbc26b': preresnetbc26b,
'preresnet34': preresnet34,
'preresnetbc38b': preresnetbc38b,
'preresnet50': preresnet50,
'preresnet50b': preresnet50b,
'preresnet101': preresnet101,
'preresnet101b': preresnet101b,
'preresnet152': preresnet152,
'preresnet152b': preresnet152b,
'preresnet200': preresnet200,
'preresnet200b': preresnet200b,
'preresnet269b': preresnet269b,
'resnext14_16x4d': resnext14_16x4d,
'resnext14_32x2d': resnext14_32x2d,
'resnext14_32x4d': resnext14_32x4d,
'resnext26_16x4d': resnext26_16x4d,
'resnext26_32x2d': resnext26_32x2d,
'resnext26_32x4d': resnext26_32x4d,
'resnext38_32x4d': resnext38_32x4d,
'resnext50_32x4d': resnext50_32x4d,
'resnext101_32x4d': resnext101_32x4d,
'resnext101_64x4d': resnext101_64x4d,
'seresnet10': seresnet10,
'seresnet12': seresnet12,
'seresnet14': seresnet14,
'seresnet16': seresnet16,
'seresnet18': seresnet18,
'seresnet26': seresnet26,
'seresnetbc26b': seresnetbc26b,
'seresnet34': seresnet34,
'seresnetbc38b': seresnetbc38b,
'seresnet50': seresnet50,
'seresnet50b': seresnet50b,
'seresnet101': seresnet101,
'seresnet101b': seresnet101b,
'seresnet152': seresnet152,
'seresnet152b': seresnet152b,
'seresnet200': seresnet200,
'seresnet200b': seresnet200b,
'sepreresnet10': sepreresnet10,
'sepreresnet12': sepreresnet12,
'sepreresnet14': sepreresnet14,
'sepreresnet16': sepreresnet16,
'sepreresnet18': sepreresnet18,
'sepreresnet26': sepreresnet26,
'sepreresnetbc26b': sepreresnetbc26b,
'sepreresnet34': sepreresnet34,
'sepreresnetbc38b': sepreresnetbc38b,
'sepreresnet50': sepreresnet50,
'sepreresnet50b': sepreresnet50b,
'sepreresnet101': sepreresnet101,
'sepreresnet101b': sepreresnet101b,
'sepreresnet152': sepreresnet152,
'sepreresnet152b': sepreresnet152b,
'sepreresnet200': sepreresnet200,
'sepreresnet200b': sepreresnet200b,
'seresnext50_32x4d': seresnext50_32x4d,
'seresnext101_32x4d': seresnext101_32x4d,
'seresnext101_64x4d': seresnext101_64x4d,
'senet16': senet16,
'senet28': senet28,
'senet40': senet40,
'senet52': senet52,
'senet103': senet103,
'senet154': senet154,
'densenet121': densenet121,
'densenet161': densenet161,
'densenet169': densenet169,
'densenet201': densenet201,
'darknet_ref': darknet_ref,
'darknet_tiny': darknet_tiny,
'darknet19': darknet19,
'darknet53': darknet53,
'channelnet': channelnet,
'squeezenet_v1_0': squeezenet_v1_0,
'squeezenet_v1_1': squeezenet_v1_1,
'squeezeresnet_v1_0': squeezeresnet_v1_0,
'squeezeresnet_v1_1': squeezeresnet_v1_1,
'sqnxt23_w1': sqnxt23_w1,
'sqnxt23_w3d2': sqnxt23_w3d2,
'sqnxt23_w2': sqnxt23_w2,
'sqnxt23v5_w1': sqnxt23v5_w1,
'sqnxt23v5_w3d2': sqnxt23v5_w3d2,
'sqnxt23v5_w2': sqnxt23v5_w2,
'shufflenet_g1_w1': shufflenet_g1_w1,
'shufflenet_g2_w1': shufflenet_g2_w1,
'shufflenet_g3_w1': shufflenet_g3_w1,
'shufflenet_g4_w1': shufflenet_g4_w1,
'shufflenet_g8_w1': shufflenet_g8_w1,
'shufflenet_g1_w3d4': shufflenet_g1_w3d4,
'shufflenet_g3_w3d4': shufflenet_g3_w3d4,
'shufflenet_g1_wd2': shufflenet_g1_wd2,
'shufflenet_g3_wd2': shufflenet_g3_wd2,
'shufflenet_g1_wd4': shufflenet_g1_wd4,
'shufflenet_g3_wd4': shufflenet_g3_wd4,
'shufflenetv2_wd2': shufflenetv2_wd2,
'shufflenetv2_w1': shufflenetv2_w1,
'shufflenetv2_w3d2': shufflenetv2_w3d2,
'shufflenetv2_w2': shufflenetv2_w2,
'shufflenetv2b_wd2': shufflenetv2b_wd2,
'shufflenetv2b_w1': shufflenetv2b_w1,
'shufflenetv2b_w3d2': shufflenetv2b_w3d2,
'shufflenetv2b_w2': shufflenetv2b_w2,
'menet108_8x1_g3': menet108_8x1_g3,
'menet128_8x1_g4': menet128_8x1_g4,
'menet160_8x1_g8': menet160_8x1_g8,
'menet228_12x1_g3': menet228_12x1_g3,
'menet256_12x1_g4': menet256_12x1_g4,
'menet348_12x1_g3': menet348_12x1_g3,
'menet352_12x1_g8': menet352_12x1_g8,
'menet456_24x1_g3': menet456_24x1_g3,
'mobilenet_w1': mobilenet_w1,
'mobilenet_w3d4': mobilenet_w3d4,
'mobilenet_wd2': mobilenet_wd2,
'mobilenet_wd4': mobilenet_wd4,
'fdmobilenet_w1': fdmobilenet_w1,
'fdmobilenet_w3d4': fdmobilenet_w3d4,
'fdmobilenet_wd2': fdmobilenet_wd2,
'fdmobilenet_wd4': fdmobilenet_wd4,
'mobilenetv2_w1': mobilenetv2_w1,
'mobilenetv2_w3d4': mobilenetv2_w3d4,
'mobilenetv2_wd2': mobilenetv2_wd2,
'mobilenetv2_wd4': mobilenetv2_wd4,
'mobilenetv3_small_w7d20': mobilenetv3_small_w7d20,
'mobilenetv3_small_wd2': mobilenetv3_small_wd2,
'mobilenetv3_small_w3d4': mobilenetv3_small_w3d4,
'mobilenetv3_small_w1': mobilenetv3_small_w1,
'mobilenetv3_small_w5d4': mobilenetv3_small_w5d4,
'mobilenetv3_large_w7d20': mobilenetv3_large_w7d20,
'mobilenetv3_large_wd2': mobilenetv3_large_wd2,
'mobilenetv3_large_w3d4': mobilenetv3_large_w3d4,
'mobilenetv3_large_w1': mobilenetv3_large_w1,
'mobilenetv3_large_w5d4': mobilenetv3_large_w5d4,
'igcv3_w1': igcv3_w1,
'igcv3_w3d4': igcv3_w3d4,
'igcv3_wd2': igcv3_wd2,
'igcv3_wd4': igcv3_wd4,
'mnasnet_b1': mnasnet_b1,
'mnasnet_a1': mnasnet_a1,
'mnasnet_small': mnasnet_small,
}
def get_model(name, **kwargs):
"""
Get supported model.
Parameters:
----------
name : str
Name of model.
Returns:
-------
HybridBlock
Resulted model.
"""
name = name.lower()
if name not in _models:
raise ValueError("Unsupported model: {}".format(name))
net = _models[name](**kwargs)
return net
def init_variables_from_state_dict(sess,
state_dict,
ignore_extra=True):
"""
Initialize model variables from state dictionary.
Parameters:
----------
sess: Session
A Session to use to load the weights.
state_dict : dict
Dictionary with values of model variables.
ignore_extra : bool, default True
Whether to silently ignore parameters from the file that are not present in this Module.
"""
from .models.model_store import init_variables_from_state_dict
init_variables_from_state_dict(
sess=sess,
state_dict=state_dict,
ignore_extra=ignore_extra)
| 8,612 | 29.010453 | 96 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/shufflenetv2.py | """
ShuffleNet V2 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
"""
__all__ = ['ShuffleNetV2', 'shufflenetv2_wd2', 'shufflenetv2_w1', 'shufflenetv2_w3d2', 'shufflenetv2_w2']
import os
import tensorflow as tf
from .common import conv1x1, depthwise_conv3x3, conv1x1_block, conv3x3_block, batchnorm, channel_shuffle, maxpool2d,\
se_block, is_channels_first, get_channel_axis, flatten
def shuffle_unit(x,
in_channels,
out_channels,
downsample,
use_se,
use_residual,
training,
data_format,
name="shuffle_unit"):
"""
ShuffleNetV2 unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
downsample : bool
Whether do downsample.
use_se : bool
Whether to use SE block.
use_residual : bool
Whether to use residual connection.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'shuffle_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = out_channels // 2
if downsample:
y1 = depthwise_conv3x3(
x=x,
channels=in_channels,
strides=2,
data_format=data_format,
name=name + "/dw_conv4")
y1 = batchnorm(
x=y1,
training=training,
data_format=data_format,
name=name + "/dw_bn4")
y1 = conv1x1(
x=y1,
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name=name + "/expand_conv5/conv")
y1 = batchnorm(
x=y1,
training=training,
data_format=data_format,
name=name + "/expand_bn5")
y1 = tf.nn.relu(y1, name=name + "/expand_activ5")
x2 = x
else:
y1, x2 = tf.split(x, num_or_size_splits=2, axis=get_channel_axis(data_format))
y2 = conv1x1(
x=x2,
in_channels=(in_channels if downsample else mid_channels),
out_channels=mid_channels,
data_format=data_format,
name=name + "/compress_conv1/conv")
y2 = batchnorm(
x=y2,
training=training,
data_format=data_format,
name=name + "/compress_bn1")
y2 = tf.nn.relu(y2, name=name + "/compress_activ1")
y2 = depthwise_conv3x3(
x=y2,
channels=mid_channels,
strides=(2 if downsample else 1),
data_format=data_format,
name=name + "/dw_conv2")
y2 = batchnorm(
x=y2,
training=training,
data_format=data_format,
name=name + "/dw_bn2")
y2 = conv1x1(
x=y2,
in_channels=mid_channels,
out_channels=mid_channels,
data_format=data_format,
name=name + "/expand_conv3/conv")
y2 = batchnorm(
x=y2,
training=training,
data_format=data_format,
name=name + "/expand_bn3")
y2 = tf.nn.relu(y2, name=name + "/expand_activ3")
if use_se:
y2 = se_block(
x=y2,
channels=mid_channels,
data_format=data_format,
name=name + "/se")
if use_residual and not downsample:
y2 = y2 + x2
x = tf.concat([y1, y2], axis=get_channel_axis(data_format), name=name + "/concat")
assert (mid_channels % 2 == 0)
x = channel_shuffle(
x=x,
groups=2,
data_format=data_format)
return x
def shuffle_init_block(x,
in_channels,
out_channels,
training,
data_format,
name="shuffle_init_block"):
"""
ShuffleNetV2 specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'shuffle_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=2,
training=training,
data_format=data_format,
name=name + "/conv")
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=0,
ceil_mode=True,
data_format=data_format,
name=name + "/pool")
return x
class ShuffleNetV2(object):
"""
ShuffleNetV2 model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
use_se : bool, default False
Whether to use SE block.
use_residual : bool, default False
Whether to use residual connections.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
use_se=False,
use_residual=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ShuffleNetV2, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.final_block_channels = final_block_channels
self.use_se = use_se
self.use_residual = use_residual
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = shuffle_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
x = shuffle_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
downsample=downsample,
use_se=self.use_se,
use_residual=self.use_residual,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=self.final_block_channels,
training=training,
data_format=self.data_format,
name="features/final_block")
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_shufflenetv2(width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ShuffleNetV2 model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
init_block_channels = 24
final_block_channels = 1024
layers = [4, 8, 4]
channels_per_layers = [116, 232, 464]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
if width_scale > 1.5:
final_block_channels = int(final_block_channels * width_scale)
net = ShuffleNetV2(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def shufflenetv2_wd2(**kwargs):
"""
ShuffleNetV2 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenetv2(width_scale=(12.0 / 29.0), model_name="shufflenetv2_wd2", **kwargs)
def shufflenetv2_w1(**kwargs):
"""
ShuffleNetV2 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenetv2(width_scale=1.0, model_name="shufflenetv2_w1", **kwargs)
def shufflenetv2_w3d2(**kwargs):
"""
ShuffleNetV2 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenetv2(width_scale=(44.0 / 29.0), model_name="shufflenetv2_w3d2", **kwargs)
def shufflenetv2_w2(**kwargs):
"""
ShuffleNetV2 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenetv2(width_scale=(61.0 / 29.0), model_name="shufflenetv2_w2", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
shufflenetv2_wd2,
shufflenetv2_w1,
shufflenetv2_w3d2,
shufflenetv2_w2,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenetv2_wd2 or weight_count == 1366792)
assert (model != shufflenetv2_w1 or weight_count == 2278604)
assert (model != shufflenetv2_w3d2 or weight_count == 4406098)
assert (model != shufflenetv2_w2 or weight_count == 7601686)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 14,880 | 29.745868 | 117 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/igcv3.py | """
IGCV3 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
"""
__all__ = ['IGCV3', 'igcv3_w1', 'igcv3_w3d4', 'igcv3_wd2', 'igcv3_wd4']
import os
import tensorflow as tf
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, channel_shuffle, is_channels_first, flatten
def inv_res_unit(x,
in_channels,
out_channels,
strides,
expansion,
training,
data_format,
name="inv_res_unit"):
"""
So-called 'Inverted Residual Unit' layer.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
expansion : bool
Whether do expansion of channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'inv_res_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
residual = (in_channels == out_channels) and (strides == 1)
mid_channels = in_channels * 6 if expansion else in_channels
groups = 2
if residual:
identity = x
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
groups=groups,
activation=None,
training=training,
data_format=data_format,
name=name + "/conv1")
x = channel_shuffle(
x=x,
groups=groups,
data_format=data_format)
x = dwconv3x3_block(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation="relu6",
training=training,
data_format=data_format,
name=name + "/conv2")
x = conv1x1_block(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
groups=groups,
activation=None,
training=training,
data_format=data_format,
name=name + "/conv3")
if residual:
x = x + identity
return x
class IGCV3(object):
"""
IGCV3 model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(IGCV3, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.final_block_channels = final_block_channels
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
strides=2,
activation="relu6",
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
expansion = (i != 0) or (j != 0)
x = inv_res_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
expansion=expansion,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=self.final_block_channels,
activation="relu6",
training=training,
data_format=self.data_format,
name="features/final_block")
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_igcv3(width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create IGCV3-D model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
final_block_channels = 1280
layers = [1, 4, 6, 8, 6, 6, 1]
downsample = [0, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 32, 64, 96, 160, 320]
from functools import reduce
channels = reduce(
lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample),
[[]])
if width_scale != 1.0:
def make_even(x):
return x if (x % 2 == 0) else x + 1
channels = [[make_even(int(cij * width_scale)) for cij in ci] for ci in channels]
init_block_channels = make_even(int(init_block_channels * width_scale))
if width_scale > 1.0:
final_block_channels = make_even(int(final_block_channels * width_scale))
net = IGCV3(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def igcv3_w1(**kwargs):
"""
IGCV3-D 1.0x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_igcv3(width_scale=1.0, model_name="igcv3_w1", **kwargs)
def igcv3_w3d4(**kwargs):
"""
IGCV3-D 0.75x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_igcv3(width_scale=0.75, model_name="igcv3_w3d4", **kwargs)
def igcv3_wd2(**kwargs):
"""
IGCV3-D 0.5x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_igcv3(width_scale=0.5, model_name="igcv3_wd2", **kwargs)
def igcv3_wd4(**kwargs):
"""
IGCV3-D 0.25x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_igcv3(width_scale=0.25, model_name="igcv3_wd4", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
igcv3_w1,
igcv3_w3d4,
igcv3_wd2,
igcv3_wd4,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != igcv3_w1 or weight_count == 3491688)
assert (model != igcv3_w3d4 or weight_count == 2638084)
assert (model != igcv3_wd2 or weight_count == 1985528)
assert (model != igcv3_wd4 or weight_count == 1534020)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 12,086 | 30.313472 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/preresnet.py | """
PreResNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
"""
__all__ = ['PreResNet', 'preresnet10', 'preresnet12', 'preresnet14', 'preresnetbc14b', 'preresnet16', 'preresnet18_wd4',
'preresnet18_wd2', 'preresnet18_w3d4', 'preresnet18', 'preresnet26', 'preresnetbc26b', 'preresnet34',
'preresnetbc38b', 'preresnet50', 'preresnet50b', 'preresnet101', 'preresnet101b', 'preresnet152',
'preresnet152b', 'preresnet200', 'preresnet200b', 'preresnet269b', 'preres_block', 'preres_bottleneck_block',
'preres_init_block', 'preres_activation']
import os
import tensorflow as tf
from .common import pre_conv1x1_block, pre_conv3x3_block, conv2d, conv1x1, batchnorm, maxpool2d, is_channels_first,\
flatten
def preres_block(x,
in_channels,
out_channels,
strides,
training,
data_format,
name="preres_block"):
"""
Simple PreResNet block for residual path in PreResNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'preres_block'
Block name.
Returns:
-------
tuple of two Tensors
Resulted tensor and preactivated input tensor.
"""
x, x_pre_activ = pre_conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
return_preact=True,
training=training,
data_format=data_format,
name=name + "/conv1")
x = pre_conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
training=training,
data_format=data_format,
name=name + "/conv2")
return x, x_pre_activ
def preres_bottleneck_block(x,
in_channels,
out_channels,
strides,
conv1_stride,
training,
data_format,
name="preres_bottleneck_block"):
"""
PreResNet bottleneck block for residual path in PreResNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'preres_bottleneck_block'
Block name.
Returns:
-------
tuple of two Tensors
Resulted tensor and preactivated input tensor.
"""
mid_channels = out_channels // 4
x, x_pre_activ = pre_conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
strides=(strides if conv1_stride else 1),
return_preact=True,
training=training,
data_format=data_format,
name=name + "/conv1")
x = pre_conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
strides=(1 if conv1_stride else strides),
training=training,
data_format=data_format,
name=name + "/conv2")
x = pre_conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
training=training,
data_format=data_format,
name=name + "/conv3")
return x, x_pre_activ
def preres_unit(x,
in_channels,
out_channels,
strides,
bottleneck,
conv1_stride,
training,
data_format,
name="preres_unit"):
"""
PreResNet unit with residual connection.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'preres_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
identity = x
if bottleneck:
x, x_pre_activ = preres_bottleneck_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_stride=conv1_stride,
training=training,
data_format=data_format,
name=name + "/body")
else:
x, x_pre_activ = preres_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
training=training,
data_format=data_format,
name=name + "/body")
resize_identity = (in_channels != out_channels) or (strides != 1)
if resize_identity:
identity = conv1x1(
x=x_pre_activ,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name=name + "/identity_conv/conv")
x = x + identity
return x
def preres_init_block(x,
in_channels,
out_channels,
training,
data_format,
name="preres_init_block"):
"""
PreResNet specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'preres_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=2,
padding=3,
use_bias=False,
data_format=data_format,
name=name + "/conv")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/bn")
x = tf.nn.relu(x, name=name + "/activ")
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name=name + "/pool")
return x
def preres_activation(x,
training,
data_format,
name="preres_activation"):
"""
PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'preres_activation'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/bn")
x = tf.nn.relu(x, name=name + "/activ")
return x
class PreResNet(object):
"""
PreResNet model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(PreResNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.bottleneck = bottleneck
self.conv1_stride = conv1_stride
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = preres_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
x = preres_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=self.bottleneck,
conv1_stride=self.conv1_stride,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = preres_activation(
x=x,
training=training,
data_format=self.data_format,
name="features/post_activ")
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_preresnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create PreResNet or SE-PreResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported PreResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = PreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def preresnet10(**kwargs):
"""
PreResNet-10 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=10, model_name="preresnet10", **kwargs)
def preresnet12(**kwargs):
"""
PreResNet-12 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=12, model_name="preresnet12", **kwargs)
def preresnet14(**kwargs):
"""
PreResNet-14 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=14, model_name="preresnet14", **kwargs)
def preresnetbc14b(**kwargs):
"""
PreResNet-BC-14b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="preresnetbc14b", **kwargs)
def preresnet16(**kwargs):
"""
PreResNet-16 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=16, model_name="preresnet16", **kwargs)
def preresnet18_wd4(**kwargs):
"""
PreResNet-18 model with 0.25 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=18, width_scale=0.25, model_name="preresnet18_wd4", **kwargs)
def preresnet18_wd2(**kwargs):
"""
PreResNet-18 model with 0.5 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=18, width_scale=0.5, model_name="preresnet18_wd2", **kwargs)
def preresnet18_w3d4(**kwargs):
"""
PreResNet-18 model with 0.75 width scale from 'Identity Mappings in Deep Residual Networks,'
https://arxiv.org/abs/1603.05027. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=18, width_scale=0.75, model_name="preresnet18_w3d4", **kwargs)
def preresnet18(**kwargs):
"""
PreResNet-18 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=18, model_name="preresnet18", **kwargs)
def preresnet26(**kwargs):
"""
PreResNet-26 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=26, bottleneck=False, model_name="preresnet26", **kwargs)
def preresnetbc26b(**kwargs):
"""
PreResNet-BC-26b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="preresnetbc26b", **kwargs)
def preresnet34(**kwargs):
"""
PreResNet-34 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=34, model_name="preresnet34", **kwargs)
def preresnetbc38b(**kwargs):
"""
PreResNet-BC-38b model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_preresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="preresnetbc38b", **kwargs)
def preresnet50(**kwargs):
"""
PreResNet-50 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=50, model_name="preresnet50", **kwargs)
def preresnet50b(**kwargs):
"""
PreResNet-50 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=50, conv1_stride=False, model_name="preresnet50b", **kwargs)
def preresnet101(**kwargs):
"""
PreResNet-101 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=101, model_name="preresnet101", **kwargs)
def preresnet101b(**kwargs):
"""
PreResNet-101 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=101, conv1_stride=False, model_name="preresnet101b", **kwargs)
def preresnet152(**kwargs):
"""
PreResNet-152 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=152, model_name="preresnet152", **kwargs)
def preresnet152b(**kwargs):
"""
PreResNet-152 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=152, conv1_stride=False, model_name="preresnet152b", **kwargs)
def preresnet200(**kwargs):
"""
PreResNet-200 model from 'Identity Mappings in Deep Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=200, model_name="preresnet200", **kwargs)
def preresnet200b(**kwargs):
"""
PreResNet-200 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=200, conv1_stride=False, model_name="preresnet200b", **kwargs)
def preresnet269b(**kwargs):
"""
PreResNet-269 model with stride at the second convolution in bottleneck block from 'Identity Mappings in Deep
Residual Networks,' https://arxiv.org/abs/1603.05027.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_preresnet(blocks=269, conv1_stride=False, model_name="preresnet269b", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
preresnet10,
preresnet12,
preresnet14,
preresnetbc14b,
preresnet16,
preresnet18_wd4,
preresnet18_wd2,
preresnet18_w3d4,
preresnet18,
preresnet26,
preresnetbc26b,
preresnet34,
preresnetbc38b,
preresnet50,
preresnet50b,
preresnet101,
preresnet101b,
preresnet152,
preresnet152b,
preresnet200,
preresnet200b,
preresnet269b,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != preresnet10 or weight_count == 5417128)
assert (model != preresnet12 or weight_count == 5491112)
assert (model != preresnet14 or weight_count == 5786536)
assert (model != preresnetbc14b or weight_count == 10057384)
assert (model != preresnet16 or weight_count == 6967208)
assert (model != preresnet18_wd4 or weight_count == 3935960)
assert (model != preresnet18_wd2 or weight_count == 5802440)
assert (model != preresnet18_w3d4 or weight_count == 8473784)
assert (model != preresnet18 or weight_count == 11687848)
assert (model != preresnet26 or weight_count == 17958568)
assert (model != preresnetbc26b or weight_count == 15987624)
assert (model != preresnet34 or weight_count == 21796008)
assert (model != preresnetbc38b or weight_count == 21917864)
assert (model != preresnet50 or weight_count == 25549480)
assert (model != preresnet50b or weight_count == 25549480)
assert (model != preresnet101 or weight_count == 44541608)
assert (model != preresnet101b or weight_count == 44541608)
assert (model != preresnet152 or weight_count == 60185256)
assert (model != preresnet152b or weight_count == 60185256)
assert (model != preresnet200 or weight_count == 64666280)
assert (model != preresnet200b or weight_count == 64666280)
assert (model != preresnet269b or weight_count == 102065832)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 31,739 | 30.645065 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/shufflenetv2b.py | """
ShuffleNet V2 for ImageNet-1K, implemented in TensorFlow. The alternative variant.
Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
"""
__all__ = ['ShuffleNetV2b', 'shufflenetv2b_wd2', 'shufflenetv2b_w1', 'shufflenetv2b_w3d2', 'shufflenetv2b_w2']
import os
import tensorflow as tf
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, channel_shuffle, channel_shuffle2, maxpool2d,\
se_block, is_channels_first, get_channel_axis, flatten
def shuffle_unit(x,
in_channels,
out_channels,
downsample,
use_se,
use_residual,
shuffle_group_first,
training,
data_format,
name="shuffle_unit"):
"""
ShuffleNetV2(b) unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
downsample : bool
Whether do downsample.
use_se : bool
Whether to use SE block.
use_residual : bool
Whether to use residual connection.
shuffle_group_first : bool
Whether to use channel shuffle in group first mode.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'shuffle_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = out_channels // 2
in_channels2 = in_channels // 2
assert (in_channels % 2 == 0)
if downsample:
y1 = dwconv3x3_block(
x=x,
in_channels=in_channels,
out_channels=in_channels,
strides=2,
activation=None,
training=training,
data_format=data_format,
name=name + "/shortcut_dconv")
y1 = conv1x1_block(
x=y1,
in_channels=in_channels,
out_channels=in_channels,
training=training,
data_format=data_format,
name=name + "/shortcut_conv")
x2 = x
else:
y1, x2 = tf.split(x, num_or_size_splits=2, axis=get_channel_axis(data_format))
y2_in_channels = (in_channels if downsample else in_channels2)
y2_out_channels = out_channels - y2_in_channels
y2 = conv1x1_block(
x=x2,
in_channels=y2_in_channels,
out_channels=mid_channels,
training=training,
data_format=data_format,
name=name + "/conv1")
y2 = dwconv3x3_block(
x=y2,
in_channels=mid_channels,
out_channels=mid_channels,
strides=(2 if downsample else 1),
activation=None,
training=training,
data_format=data_format,
name=name + "/dconv")
y2 = conv1x1_block(
x=y2,
in_channels=mid_channels,
out_channels=y2_out_channels,
training=training,
data_format=data_format,
name=name + "/conv2")
if use_se:
y2 = se_block(
x=y2,
channels=y2_out_channels,
data_format=data_format,
name=name + "/se")
if use_residual and not downsample:
assert (y2_out_channels == in_channels2)
y2 = y2 + x2
x = tf.concat([y1, y2], axis=get_channel_axis(data_format), name=name + "/concat")
assert (out_channels % 2 == 0)
if shuffle_group_first:
x = channel_shuffle(
x=x,
groups=2,
data_format=data_format)
else:
x = channel_shuffle2(
x=x,
groups=2,
data_format=data_format)
return x
def shuffle_init_block(x,
in_channels,
out_channels,
training,
data_format,
name="shuffle_init_block"):
"""
ShuffleNetV2(b) specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'shuffle_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=2,
training=training,
data_format=data_format,
name=name + "/conv")
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=1,
ceil_mode=False,
data_format=data_format,
name=name + "/pool")
return x
class ShuffleNetV2b(object):
"""
ShuffleNetV2(b) model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
use_se : bool, default False
Whether to use SE block.
use_residual : bool, default False
Whether to use residual connections.
shuffle_group_first : bool, default True
Whether to use channel shuffle in group first mode.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
use_se=False,
use_residual=False,
shuffle_group_first=True,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ShuffleNetV2b, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.final_block_channels = final_block_channels
self.use_se = use_se
self.use_residual = use_residual
self.shuffle_group_first = shuffle_group_first
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = shuffle_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
x = shuffle_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
downsample=downsample,
use_se=self.use_se,
use_residual=self.use_residual,
shuffle_group_first=self.shuffle_group_first,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=self.final_block_channels,
training=training,
data_format=self.data_format,
name="features/final_block")
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_shufflenetv2b(width_scale,
shuffle_group_first=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ShuffleNetV2(b) model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
shuffle_group_first : bool, default True
Whether to use channel shuffle in group first mode.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
init_block_channels = 24
final_block_channels = 1024
layers = [4, 8, 4]
channels_per_layers = [116, 232, 464]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
if width_scale > 1.5:
final_block_channels = int(final_block_channels * width_scale)
net = ShuffleNetV2b(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
shuffle_group_first=shuffle_group_first,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def shufflenetv2b_wd2(**kwargs):
"""
ShuffleNetV2(b) 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenetv2b(
width_scale=(12.0 / 29.0),
shuffle_group_first=True,
model_name="shufflenetv2b_wd2",
**kwargs)
def shufflenetv2b_w1(**kwargs):
"""
ShuffleNetV2(b) 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenetv2b(
width_scale=1.0,
shuffle_group_first=True,
model_name="shufflenetv2b_w1",
**kwargs)
def shufflenetv2b_w3d2(**kwargs):
"""
ShuffleNetV2(b) 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenetv2b(
width_scale=(44.0 / 29.0),
shuffle_group_first=True,
model_name="shufflenetv2b_w3d2",
**kwargs)
def shufflenetv2b_w2(**kwargs):
"""
ShuffleNetV2(b) 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenetv2b(
width_scale=(61.0 / 29.0),
shuffle_group_first=True,
model_name="shufflenetv2b_w2",
**kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
shufflenetv2b_wd2,
shufflenetv2b_w1,
shufflenetv2b_w3d2,
shufflenetv2b_w2,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenetv2b_wd2 or weight_count == 1366792)
assert (model != shufflenetv2b_w1 or weight_count == 2279760)
assert (model != shufflenetv2b_w3d2 or weight_count == 4410194)
assert (model != shufflenetv2b_w2 or weight_count == 7611290)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 15,582 | 29.980119 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/menet.py | """
MENet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,'
https://arxiv.org/abs/1803.09127.
"""
__all__ = ['MENet', 'menet108_8x1_g3', 'menet128_8x1_g4', 'menet160_8x1_g8', 'menet228_12x1_g3', 'menet256_12x1_g4',
'menet348_12x1_g3', 'menet352_12x1_g8', 'menet456_24x1_g3']
import os
import tensorflow as tf
from .common import conv2d, conv1x1, conv3x3, depthwise_conv3x3, batchnorm, channel_shuffle, maxpool2d, avgpool2d,\
is_channels_first, get_channel_axis, flatten
def me_unit(x,
in_channels,
out_channels,
side_channels,
groups,
downsample,
ignore_group,
training,
data_format,
name="me_unit"):
"""
MENet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
side_channels : int
Number of side channels.
groups : int
Number of groups in convolution layers.
downsample : bool
Whether do downsample.
ignore_group : bool
Whether ignore group value in the first convolution layer.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'me_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = out_channels // 4
if downsample:
out_channels -= in_channels
identity = x
# pointwise group convolution 1
x = conv1x1(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
groups=(1 if ignore_group else groups),
data_format=data_format,
name=name + "/compress_conv1")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/compress_bn1")
x = tf.nn.relu(x, name=name + "/compress_activ")
assert (mid_channels % groups == 0)
x = channel_shuffle(
x=x,
groups=groups,
data_format=data_format)
# merging
y = conv1x1(
x=x,
in_channels=mid_channels,
out_channels=side_channels,
data_format=data_format,
name=name + "/s_merge_conv/conv")
y = batchnorm(
x=y,
training=training,
data_format=data_format,
name=name + "/s_merge_bn")
y = tf.nn.relu(y, name=name + "/s_merge_activ")
# depthwise convolution (bottleneck)
x = depthwise_conv3x3(
x=x,
channels=mid_channels,
strides=(2 if downsample else 1),
data_format=data_format,
name=name + "/dw_conv2")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/dw_bn2")
# evolution
y = conv3x3(
x=y,
in_channels=side_channels,
out_channels=side_channels,
strides=(2 if downsample else 1),
data_format=data_format,
name=name + "/s_conv")
y = batchnorm(
x=y,
training=training,
data_format=data_format,
name=name + "/s_conv_bn")
y = tf.nn.relu(y, name=name + "/s_conv_activ")
y = conv1x1(
x=y,
in_channels=side_channels,
out_channels=mid_channels,
data_format=data_format,
name=name + "/s_evolve_conv/conv")
y = batchnorm(
x=y,
training=training,
data_format=data_format,
name=name + "/s_evolve_bn")
y = tf.nn.sigmoid(y, name=name + "/s_evolve_activ")
x = x * y
# pointwise group convolution 2
x = conv1x1(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
groups=groups,
data_format=data_format,
name=name + "/expand_conv3")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/expand_bn3")
if downsample:
identity = avgpool2d(
x=identity,
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name=name + "/avgpool")
x = tf.concat([x, identity], axis=get_channel_axis(data_format), name=name + "/concat")
else:
x = x + identity
x = tf.nn.relu(x, name=name + "/final_activ")
return x
def me_init_block(x,
in_channels,
out_channels,
training,
data_format,
name="me_init_block"):
"""
MENet specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'me_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=2,
padding=1,
use_bias=False,
data_format=data_format,
name=name + "/conv")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/bn")
x = tf.nn.relu(x, name=name + "/activ")
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name=name + "/pool")
return x
class MENet(object):
"""
MENet model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile Applications,'
https://arxiv.org/abs/1803.09127.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
side_channels : int
Number of side channels in a ME-unit.
groups : int
Number of groups in convolution layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
side_channels,
groups,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(MENet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.side_channels = side_channels
self.groups = groups
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = me_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
ignore_group = (i == 0) and (j == 0)
x = me_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
side_channels=self.side_channels,
groups=self.groups,
downsample=downsample,
ignore_group=ignore_group,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_menet(first_stage_channels,
side_channels,
groups,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create MENet model with specific parameters.
Parameters:
----------
first_stage_channels : int
Number of output channels at the first stage.
side_channels : int
Number of side channels in a ME-unit.
groups : int
Number of groups in convolution layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
layers = [4, 8, 4]
if first_stage_channels == 108:
init_block_channels = 12
channels_per_layers = [108, 216, 432]
elif first_stage_channels == 128:
init_block_channels = 12
channels_per_layers = [128, 256, 512]
elif first_stage_channels == 160:
init_block_channels = 16
channels_per_layers = [160, 320, 640]
elif first_stage_channels == 228:
init_block_channels = 24
channels_per_layers = [228, 456, 912]
elif first_stage_channels == 256:
init_block_channels = 24
channels_per_layers = [256, 512, 1024]
elif first_stage_channels == 348:
init_block_channels = 24
channels_per_layers = [348, 696, 1392]
elif first_stage_channels == 352:
init_block_channels = 24
channels_per_layers = [352, 704, 1408]
elif first_stage_channels == 456:
init_block_channels = 48
channels_per_layers = [456, 912, 1824]
else:
raise ValueError("The {} of `first_stage_channels` is not supported".format(first_stage_channels))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = MENet(
channels=channels,
init_block_channels=init_block_channels,
side_channels=side_channels,
groups=groups,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def menet108_8x1_g3(**kwargs):
"""
108-MENet-8x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_menet(first_stage_channels=108, side_channels=8, groups=3, model_name="menet108_8x1_g3", **kwargs)
def menet128_8x1_g4(**kwargs):
"""
128-MENet-8x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_menet(first_stage_channels=128, side_channels=8, groups=4, model_name="menet128_8x1_g4", **kwargs)
def menet160_8x1_g8(**kwargs):
"""
160-MENet-8x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_menet(first_stage_channels=160, side_channels=8, groups=8, model_name="menet160_8x1_g8", **kwargs)
def menet228_12x1_g3(**kwargs):
"""
228-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_menet(first_stage_channels=228, side_channels=12, groups=3, model_name="menet228_12x1_g3", **kwargs)
def menet256_12x1_g4(**kwargs):
"""
256-MENet-12x1 (g=4) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_menet(first_stage_channels=256, side_channels=12, groups=4, model_name="menet256_12x1_g4", **kwargs)
def menet348_12x1_g3(**kwargs):
"""
348-MENet-12x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_menet(first_stage_channels=348, side_channels=12, groups=3, model_name="menet348_12x1_g3", **kwargs)
def menet352_12x1_g8(**kwargs):
"""
352-MENet-12x1 (g=8) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_menet(first_stage_channels=352, side_channels=12, groups=8, model_name="menet352_12x1_g8", **kwargs)
def menet456_24x1_g3(**kwargs):
"""
456-MENet-24x1 (g=3) model from 'Merging and Evolution: Improving Convolutional Neural Networks for Mobile
Applications,' https://arxiv.org/abs/1803.09127.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_menet(first_stage_channels=456, side_channels=24, groups=3, model_name="menet456_24x1_g3", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
menet108_8x1_g3,
menet128_8x1_g4,
menet160_8x1_g8,
menet228_12x1_g3,
menet256_12x1_g4,
menet348_12x1_g3,
menet352_12x1_g8,
menet456_24x1_g3,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != menet108_8x1_g3 or weight_count == 654516)
assert (model != menet128_8x1_g4 or weight_count == 750796)
assert (model != menet160_8x1_g8 or weight_count == 850120)
assert (model != menet228_12x1_g3 or weight_count == 1806568)
assert (model != menet256_12x1_g4 or weight_count == 1888240)
assert (model != menet348_12x1_g3 or weight_count == 3368128)
assert (model != menet352_12x1_g8 or weight_count == 2272872)
assert (model != menet456_24x1_g3 or weight_count == 5304784)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 19,323 | 29.86901 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/channelnet.py | """
ChannelNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise Convolutions,'
https://arxiv.org/abs/1809.01330.
"""
__all__ = ['ChannelNet', 'channelnet']
import os
import tensorflow as tf
from .common import conv2d, batchnorm, is_channels_first, get_channel_axis, flatten
def dwconv3x3(x,
in_channels,
out_channels,
strides,
use_bias=False,
data_format="channels_last",
name="dwconv3x3"):
"""
3x3 depthwise version of the standard convolution layer.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'dwconv3x3'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=1,
groups=out_channels,
use_bias=use_bias,
data_format=data_format,
name=name)
def channet_conv(x,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
dropout_rate=0.0,
activate=True,
training=False,
data_format="channels_last",
name="channet_conv"):
"""
ChannelNet specific convolution block with Batch normalization and ReLU6 activation.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
dropout_rate : float, default 0.0
Dropout rate.
activate : bool, default True
Whether activate the convolution block.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'channet_conv'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name=name + "/conv")
if dropout_rate > 0.0:
x = tf.keras.layers.Dropout(
rate=dropout_rate,
name=name + "/dropout")(
inputs=x,
training=training)
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/bn")
if activate:
x = tf.nn.relu6(x, name=name + "/activ")
return x
def channet_conv1x1(x,
in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
dropout_rate=0.0,
activate=True,
training=False,
data_format="channels_last",
name="channet_conv1x1"):
"""
1x1 version of ChannelNet specific convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
dropout_rate : float, default 0.0
Dropout rate.
activate : bool, default True
Whether activate the convolution block.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'channet_conv1x1'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return channet_conv(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
groups=groups,
use_bias=use_bias,
dropout_rate=dropout_rate,
activate=activate,
training=training,
data_format=data_format,
name=name)
def channet_conv3x3(x,
in_channels,
out_channels,
strides,
padding=1,
dilation=1,
groups=1,
use_bias=False,
dropout_rate=0.0,
activate=True,
training=False,
data_format="channels_last",
name="channet_conv3x3"):
"""
3x3 version of ChannelNet specific convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
dropout_rate : float, default 0.0
Dropout rate.
activate : bool, default True
Whether activate the convolution block.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'channet_conv3x3'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return channet_conv(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
dropout_rate=dropout_rate,
activate=activate,
training=training,
data_format=data_format,
name=name)
def channet_dws_conv_block(x,
in_channels,
out_channels,
strides,
groups=1,
dropout_rate=0.0,
training=False,
data_format="channels_last",
name="channet_dws_conv_block"):
"""
ChannelNet specific depthwise separable convolution block with BatchNorms and activations at last convolution
layers.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
groups : int, default 1
Number of groups.
dropout_rate : float, default 0.0
Dropout rate.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'channet_dws_conv_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = dwconv3x3(
x=x,
in_channels=in_channels,
out_channels=in_channels,
strides=strides,
data_format=data_format,
name=name + '/dw_conv')
x = channet_conv1x1(
x=x,
in_channels=in_channels,
out_channels=out_channels,
groups=groups,
dropout_rate=dropout_rate,
training=training,
data_format=data_format,
name=name + '/pw_conv')
return x
def simple_group_block(x,
channels,
multi_blocks,
groups,
dropout_rate,
training,
data_format,
name="simple_group_block"):
"""
ChannelNet specific block with a sequence of depthwise separable group convolution layers.
Parameters:
----------
x : Tensor
Input tensor.
channels : int
Number of input/output channels.
multi_blocks : int
Number of DWS layers in the sequence.
groups : int
Number of groups.
dropout_rate : float
Dropout rate.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'simple_group_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
# assert (channels == x.shape[1].value)
for i in range(multi_blocks):
x = channet_dws_conv_block(
x=x,
in_channels=channels,
out_channels=channels,
strides=1,
groups=groups,
dropout_rate=dropout_rate,
training=training,
data_format=data_format,
name=name + '/block{}'.format(i + 1))
return x
def channelwise_conv2d(x,
groups,
dropout_rate,
training=False,
data_format="channels_last",
name="pure_conv2d"):
"""
ChannelNet specific block with channel-wise convolution.
Parameters:
----------
x : Tensor
Input tensor.
dropout_rate : float
Dropout rate.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'channelwise_conv2d'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = tf.expand_dims(x, axis=get_channel_axis(data_format), name=name + '/expand_dims')
filters = groups
kernel_size = [4 * groups, 1, 1]
strides = [groups, 1, 1]
x = tf.keras.layers.Conv3D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding="same",
data_format=data_format,
use_bias=False,
name=name + '/conv')(x)
if dropout_rate > 0.0:
x = tf.keras.layers.Dropout(
rate=dropout_rate,
name=name + "/dropout")(
inputs=x,
training=training)
if filters == 1:
x = tf.squeeze(x, axis=[get_channel_axis(data_format)], name=name + '/squeeze')
x = tf.unstack(x, axis=get_channel_axis(data_format), name=name + '/unstack')
x = tf.concat(x, axis=get_channel_axis(data_format), name=name + "/concat")
return x
def conv_group_block(x,
channels,
multi_blocks,
groups,
dropout_rate,
training,
data_format,
name="conv_group_block"):
"""
ChannelNet specific block with a combination of channel-wise convolution, depthwise separable group convolutions.
Parameters:
----------
x : Tensor
Input tensor.
channels : int
Number of input/output channels.
multi_blocks : int
Number of DWS layers in the sequence.
groups : int
Number of groups.
dropout_rate : float
Dropout rate.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'conv_group_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
assert (channels == x.shape[1].value)
assert (channels % groups == 0)
x = channelwise_conv2d(
x=x,
groups=groups,
dropout_rate=dropout_rate,
training=training,
data_format=data_format,
name=name + '/conv')
x = simple_group_block(
x=x,
channels=channels,
multi_blocks=multi_blocks,
groups=groups,
dropout_rate=dropout_rate,
training=training,
data_format=data_format,
name=name)
return x
def channet_unit(x,
in_channels,
out_channels_list,
strides,
multi_blocks,
groups,
dropout_rate,
block_names,
merge_type,
training,
data_format,
name="channet_unit"):
"""
ChannelNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels_list : tuple/list of 2 int
Number of output channels for each sub-block.
strides : int or tuple/list of 2 int
Strides of the convolution.
multi_blocks : int
Number of DWS layers in the sequence.
groups : int
Number of groups.
dropout_rate : float
Dropout rate.
block_names : tuple/list of 2 str
Sub-block names.
merge_type : str
Type of sub-block output merging.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'channet_unit'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
assert (len(block_names) == 2)
assert (merge_type in ["seq", "add", "cat"])
x_outs = []
for i, (out_channels, block_name) in enumerate(zip(out_channels_list, block_names)):
strides_i = (strides if i == 0 else 1)
name_i = name + '/block{}'.format(i + 1)
assert (x.shape[1].value == in_channels)
if block_name == "channet_conv3x3":
x = channet_conv3x3(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides_i,
dropout_rate=dropout_rate,
activate=False,
training=training,
data_format=data_format,
name=name_i)
elif block_name == "channet_dws_conv_block":
x = channet_dws_conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides_i,
dropout_rate=dropout_rate,
training=training,
data_format=data_format,
name=name_i)
elif block_name == "simple_group_block":
x = simple_group_block(
x=x,
channels=in_channels,
multi_blocks=multi_blocks,
groups=groups,
dropout_rate=dropout_rate,
training=training,
data_format=data_format,
name=name_i)
elif block_name == "conv_group_block":
x = conv_group_block(
x=x,
channels=in_channels,
multi_blocks=multi_blocks,
groups=groups,
dropout_rate=dropout_rate,
training=training,
data_format=data_format,
name=name_i)
else:
raise NotImplementedError()
x_outs = x_outs + [x]
in_channels = out_channels
if merge_type == "seq":
x = x_outs[-1]
elif merge_type == "add":
x = tf.add(*x_outs, name=name + '/add')
elif merge_type == "cat":
x = tf.concat(x_outs, axis=get_channel_axis(data_format), name=name + '/cat')
else:
raise NotImplementedError()
return x
class ChannelNet(object):
"""
ChannelNet model from 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise
Convolutions,' https://arxiv.org/abs/1809.01330.
Parameters:
----------
channels : list of list of list of int
Number of output channels for each unit.
block_names : list of list of list of str
Names of blocks for each unit.
block_names : list of list of str
Merge types for each unit.
dropout_rate : float, default 0.0001
Dropout rate.
multi_blocks : int, default 2
Block count architectural parameter.
groups : int, default 2
Group count architectural parameter.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
block_names,
merge_types,
dropout_rate=0.0001,
multi_blocks=2,
groups=2,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ChannelNet, self).__init__(**kwargs)
# assert (data_format in ["channels_last", "channels_first"])
assert (data_format in ["channels_first"])
self.channels = channels
self.block_names = block_names
self.merge_types = merge_types
self.dropout_rate = dropout_rate
self.multi_blocks = multi_blocks
self.groups = groups
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) else 1
x = channet_unit(
x=x,
in_channels=in_channels,
out_channels_list=out_channels,
strides=strides,
multi_blocks=self.multi_blocks,
groups=self.groups,
dropout_rate=self.dropout_rate,
block_names=self.block_names[i][j],
merge_type=self.merge_types[i][j],
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
if self.merge_types[i][j] == "cat":
in_channels = sum(out_channels)
else:
in_channels = out_channels[-1]
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_channelnet(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ChannelNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
channels = [[[32, 64]], [[128, 128]], [[256, 256]], [[512, 512], [512, 512]], [[1024, 1024]]]
block_names = [[["channet_conv3x3", "channet_dws_conv_block"]],
[["channet_dws_conv_block", "channet_dws_conv_block"]],
[["channet_dws_conv_block", "channet_dws_conv_block"]],
[["channet_dws_conv_block", "simple_group_block"], ["conv_group_block", "conv_group_block"]],
[["channet_dws_conv_block", "channet_dws_conv_block"]]]
merge_types = [["cat"], ["cat"], ["cat"], ["add", "add"], ["seq"]]
net = ChannelNet(
channels=channels,
block_names=block_names,
merge_types=merge_types,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def channelnet(**kwargs):
"""
ChannelNet model from 'ChannelNets: Compact and Efficient Convolutional Neural Networks via Channel-Wise
Convolutions,' https://arxiv.org/abs/1809.01330.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_channelnet(model_name="channelnet", **kwargs)
def _test():
import numpy as np
data_format = "channels_first"
pretrained = False
models = [
channelnet,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != channelnet or weight_count == 3875112)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 24,927 | 30.16 | 117 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/squeezenext.py | """
SqueezeNext for ImageNet-1K, implemented in TensorFlow.
Original paper: 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
"""
__all__ = ['SqueezeNext', 'sqnxt23_w1', 'sqnxt23_w3d2', 'sqnxt23_w2', 'sqnxt23v5_w1', 'sqnxt23v5_w3d2', 'sqnxt23v5_w2']
import os
import tensorflow as tf
from .common import maxpool2d, conv_block, conv1x1_block, conv7x7_block, is_channels_first, flatten
def sqnxt_unit(x,
in_channels,
out_channels,
strides,
training,
data_format,
name="sqnxt_unit"):
"""
SqueezeNext unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'sqnxt_unit'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
if strides == 2:
reduction_den = 1
resize_identity = True
elif in_channels > out_channels:
reduction_den = 4
resize_identity = True
else:
reduction_den = 2
resize_identity = False
if resize_identity:
identity = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_bias=True,
training=training,
data_format=data_format,
name=name + "/identity_conv")
else:
identity = x
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=(in_channels // reduction_den),
strides=strides,
use_bias=True,
training=training,
data_format=data_format,
name=name + "/conv1")
x = conv1x1_block(
x=x,
in_channels=(in_channels // reduction_den),
out_channels=(in_channels // (2 * reduction_den)),
use_bias=True,
training=training,
data_format=data_format,
name=name + "/conv2")
x = conv_block(
x=x,
in_channels=(in_channels // (2 * reduction_den)),
out_channels=(in_channels // reduction_den),
kernel_size=(1, 3),
strides=1,
padding=(0, 1),
use_bias=True,
training=training,
data_format=data_format,
name=name + "/conv3")
x = conv_block(
x=x,
in_channels=(in_channels // reduction_den),
out_channels=(in_channels // reduction_den),
kernel_size=(3, 1),
strides=1,
padding=(1, 0),
use_bias=True,
training=training,
data_format=data_format,
name=name + "/conv4")
x = conv1x1_block(
x=x,
in_channels=(in_channels // reduction_den),
out_channels=out_channels,
use_bias=True,
training=training,
data_format=data_format,
name=name + "/conv5")
x = x + identity
x = tf.nn.relu(x, name=name + "/final_activ")
return x
def sqnxt_init_block(x,
in_channels,
out_channels,
training,
data_format,
name="sqnxt_init_block"):
"""
ResNet specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'sqnxt_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv7x7_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=2,
padding=1,
use_bias=True,
training=training,
data_format=data_format,
name=name + "/conv")
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
ceil_mode=True,
data_format=data_format,
name=name + "/pool")
return x
class SqueezeNext(object):
"""
SqueezeNext model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SqueezeNext, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.final_block_channels = final_block_channels
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = sqnxt_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
x = sqnxt_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=self.final_block_channels,
use_bias=True,
training=training,
data_format=self.data_format,
name="features/final_block")
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_squeezenext(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SqueezeNext model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('23' or '23v5').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
init_block_channels = 64
final_block_channels = 128
channels_per_layers = [32, 64, 128, 256]
if version == '23':
layers = [6, 6, 8, 1]
elif version == '23v5':
layers = [2, 4, 14, 1]
else:
raise ValueError("Unsupported SqueezeNet version {}".format(version))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
final_block_channels = int(final_block_channels * width_scale)
net = SqueezeNext(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def sqnxt23_w1(**kwargs):
"""
1.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenext(version="23", width_scale=1.0, model_name="sqnxt23_w1", **kwargs)
def sqnxt23_w3d2(**kwargs):
"""
1.5-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenext(version="23", width_scale=1.5, model_name="sqnxt23_w3d2", **kwargs)
def sqnxt23_w2(**kwargs):
"""
2.0-SqNxt-23 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenext(version="23", width_scale=2.0, model_name="sqnxt23_w2", **kwargs)
def sqnxt23v5_w1(**kwargs):
"""
1.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenext(version="23v5", width_scale=1.0, model_name="sqnxt23v5_w1", **kwargs)
def sqnxt23v5_w3d2(**kwargs):
"""
1.5-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenext(version="23v5", width_scale=1.5, model_name="sqnxt23v5_w3d2", **kwargs)
def sqnxt23v5_w2(**kwargs):
"""
2.0-SqNxt-23v5 model from 'SqueezeNext: Hardware-Aware Neural Network Design,' https://arxiv.org/abs/1803.10615.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenext(version="23v5", width_scale=2.0, model_name="sqnxt23v5_w2", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
sqnxt23_w1,
sqnxt23_w3d2,
sqnxt23_w2,
sqnxt23v5_w1,
sqnxt23v5_w3d2,
sqnxt23v5_w2,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sqnxt23_w1 or weight_count == 724056)
assert (model != sqnxt23_w3d2 or weight_count == 1511824)
assert (model != sqnxt23_w2 or weight_count == 2583752)
assert (model != sqnxt23v5_w1 or weight_count == 921816)
assert (model != sqnxt23v5_w3d2 or weight_count == 1953616)
assert (model != sqnxt23v5_w2 or weight_count == 3366344)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 15,382 | 29.704591 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/resnet.py | """
ResNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
"""
__all__ = ['ResNet', 'resnet10', 'resnet12', 'resnet14', 'resnetbc14b', 'resnet16', 'resnet18_wd4', 'resnet18_wd2',
'resnet18_w3d4', 'resnet18', 'resnet26', 'resnetbc26b', 'resnet34', 'resnetbc38b', 'resnet50', 'resnet50b',
'resnet101', 'resnet101b', 'resnet152', 'resnet152b', 'resnet200', 'resnet200b', 'res_block',
'res_bottleneck_block', 'res_unit', 'res_init_block']
import os
import tensorflow as tf
from .common import conv1x1_block, conv3x3_block, conv7x7_block, maxpool2d, is_channels_first, flatten
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior()
def res_block(x,
in_channels,
out_channels,
strides,
training,
data_format,
name="res_block"):
"""
Simple ResNet block for residual path in ResNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'res_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
training=training,
data_format=data_format,
name=name + "/conv1")
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
activation=None,
training=training,
data_format=data_format,
name=name + "/conv2")
return x
def res_bottleneck_block(x,
in_channels,
out_channels,
strides,
conv1_stride=False,
bottleneck_factor=4,
training=False,
data_format="channels_last",
name="res_bottleneck_block"):
"""
ResNet bottleneck block for residual path in ResNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
conv1_stride : bool, default False
Whether to use stride in the first or the second convolution layer of the block.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
bottleneck_factor : int, default 4
Bottleneck factor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'res_bottleneck_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = out_channels // bottleneck_factor
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
strides=(strides if conv1_stride else 1),
training=training,
data_format=data_format,
name=name + "/conv1")
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
strides=(1 if conv1_stride else strides),
training=training,
data_format=data_format,
name=name + "/conv2")
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
activation=None,
training=training,
data_format=data_format,
name=name + "/conv3")
return x
def res_unit(x,
in_channels,
out_channels,
strides,
bottleneck,
conv1_stride,
training,
data_format,
name="res_unit"):
"""
ResNet unit with residual connection.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'res_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
resize_identity = (in_channels != out_channels) or (strides != 1)
if resize_identity:
identity = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
training=training,
data_format=data_format,
name=name + "/identity_conv")
else:
identity = x
if bottleneck:
x = res_bottleneck_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_stride=conv1_stride,
training=training,
data_format=data_format,
name=name + "/body")
else:
x = res_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
training=training,
data_format=data_format,
name=name + "/body")
x = x + identity
x = tf.nn.relu(x, name=name + "/activ")
return x
def res_init_block(x,
in_channels,
out_channels,
training,
data_format,
name):
"""
ResNet specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'res_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv7x7_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=2,
training=training,
data_format=data_format,
name=name + "/conv")
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name=name + "/pool")
return x
class ResNet(object):
"""
ResNet model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ResNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.bottleneck = bottleneck
self.conv1_stride = conv1_stride
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = res_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
x = res_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=self.bottleneck,
conv1_stride=self.conv1_stride,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_resnet(blocks,
bottleneck=None,
conv1_stride=True,
width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ResNet or SE-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = ResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def resnet10(**kwargs):
"""
ResNet-10 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=10, model_name="resnet10", **kwargs)
def resnet12(**kwargs):
"""
ResNet-12 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=12, model_name="resnet12", **kwargs)
def resnet14(**kwargs):
"""
ResNet-14 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=14, model_name="resnet14", **kwargs)
def resnetbc14b(**kwargs):
"""
ResNet-BC-14b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=14, bottleneck=True, conv1_stride=False, model_name="resnetbc14b", **kwargs)
def resnet16(**kwargs):
"""
ResNet-16 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=16, model_name="resnet16", **kwargs)
def resnet18_wd4(**kwargs):
"""
ResNet-18 model with 0.25 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=18, width_scale=0.25, model_name="resnet18_wd4", **kwargs)
def resnet18_wd2(**kwargs):
"""
ResNet-18 model with 0.5 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=18, width_scale=0.5, model_name="resnet18_wd2", **kwargs)
def resnet18_w3d4(**kwargs):
"""
ResNet-18 model with 0.75 width scale from 'Deep Residual Learning for Image Recognition,'
https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=18, width_scale=0.75, model_name="resnet18_w3d4", **kwargs)
def resnet18(**kwargs):
"""
ResNet-18 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=18, model_name="resnet18", **kwargs)
def resnet26(**kwargs):
"""
ResNet-26 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=26, bottleneck=False, model_name="resnet26", **kwargs)
def resnetbc26b(**kwargs):
"""
ResNet-BC-26b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="resnetbc26b", **kwargs)
def resnet34(**kwargs):
"""
ResNet-34 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=34, model_name="resnet34", **kwargs)
def resnetbc38b(**kwargs):
"""
ResNet-BC-38b model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="resnetbc38b", **kwargs)
def resnet50(**kwargs):
"""
ResNet-50 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=50, model_name="resnet50", **kwargs)
def resnet50b(**kwargs):
"""
ResNet-50 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=50, conv1_stride=False, model_name="resnet50b", **kwargs)
def resnet101(**kwargs):
"""
ResNet-101 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=101, model_name="resnet101", **kwargs)
def resnet101b(**kwargs):
"""
ResNet-101 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=101, conv1_stride=False, model_name="resnet101b", **kwargs)
def resnet152(**kwargs):
"""
ResNet-152 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=152, model_name="resnet152", **kwargs)
def resnet152b(**kwargs):
"""
ResNet-152 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=152, conv1_stride=False, model_name="resnet152b", **kwargs)
def resnet200(**kwargs):
"""
ResNet-200 model from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=200, model_name="resnet200", **kwargs)
def resnet200b(**kwargs):
"""
ResNet-200 model with stride at the second convolution in bottleneck block from 'Deep Residual Learning for Image
Recognition,' https://arxiv.org/abs/1512.03385. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnet(blocks=200, conv1_stride=False, model_name="resnet200b", **kwargs)
def _test():
import numpy as np
# import logging
# logging.getLogger("tensorflow").disabled = True
data_format = "channels_last"
pretrained = False
models = [
resnet10,
resnet12,
resnet14,
resnetbc14b,
resnet16,
resnet18_wd4,
resnet18_wd2,
resnet18_w3d4,
resnet18,
resnet26,
resnetbc26b,
resnet34,
resnetbc38b,
resnet50,
resnet50b,
resnet101,
resnet101b,
resnet152,
resnet152b,
resnet200,
resnet200b,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnet10 or weight_count == 5418792)
assert (model != resnet12 or weight_count == 5492776)
assert (model != resnet14 or weight_count == 5788200)
assert (model != resnetbc14b or weight_count == 10064936)
assert (model != resnet16 or weight_count == 6968872)
assert (model != resnet18_wd4 or weight_count == 3937400)
assert (model != resnet18_wd2 or weight_count == 5804296)
assert (model != resnet18_w3d4 or weight_count == 8476056)
assert (model != resnet18 or weight_count == 11689512)
assert (model != resnet26 or weight_count == 17960232)
assert (model != resnetbc26b or weight_count == 15995176)
assert (model != resnet34 or weight_count == 21797672)
assert (model != resnetbc38b or weight_count == 21925416)
assert (model != resnet50 or weight_count == 25557032)
assert (model != resnet50b or weight_count == 25557032)
assert (model != resnet101 or weight_count == 44549160)
assert (model != resnet101b or weight_count == 44549160)
assert (model != resnet152 or weight_count == 60192808)
assert (model != resnet152b or weight_count == 60192808)
assert (model != resnet200 or weight_count == 64673832)
assert (model != resnet200b or weight_count == 64673832)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 29,772 | 29.85285 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/mobilenetv2.py | """
MobileNetV2 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381.
"""
__all__ = ['MobileNetV2', 'mobilenetv2_w1', 'mobilenetv2_w3d4', 'mobilenetv2_wd2', 'mobilenetv2_wd4']
import os
import tensorflow as tf
from .common import conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, is_channels_first, flatten
def linear_bottleneck(x,
in_channels,
out_channels,
strides,
expansion,
training,
data_format,
name="linear_bottleneck"):
"""
So-called 'Linear Bottleneck' layer. It is used as a MobileNetV2 unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
expansion : bool
Whether do expansion of channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'linear_bottleneck'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
residual = (in_channels == out_channels) and (strides == 1)
mid_channels = in_channels * 6 if expansion else in_channels
if residual:
identity = x
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
activation="relu6",
training=training,
data_format=data_format,
name=name + "/conv1")
x = dwconv3x3_block(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation="relu6",
training=training,
data_format=data_format,
name=name + "/conv2")
x = conv1x1_block(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
training=training,
data_format=data_format,
name=name + "/conv3")
if residual:
x = x + identity
return x
class MobileNetV2(object):
"""
MobileNetV2 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,' https://arxiv.org/abs/1801.04381.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(MobileNetV2, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.final_block_channels = final_block_channels
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
strides=2,
activation="relu6",
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
expansion = (i != 0) or (j != 0)
x = linear_bottleneck(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
expansion=expansion,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=self.final_block_channels,
activation="relu6",
training=training,
data_format=self.data_format,
name="features/final_block")
in_channels = self.final_block_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
x = conv1x1(
x=x,
in_channels=in_channels,
out_channels=self.classes,
use_bias=False,
data_format=self.data_format,
name="output")
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
return x
def get_mobilenetv2(width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create MobileNetV2 model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
init_block_channels = 32
final_block_channels = 1280
layers = [1, 2, 3, 4, 3, 3, 1]
downsample = [0, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 32, 64, 96, 160, 320]
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [[]])
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
if width_scale > 1.0:
final_block_channels = int(final_block_channels * width_scale)
net = MobileNetV2(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def mobilenetv2_w1(**kwargs):
"""
1.0 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenetv2(width_scale=1.0, model_name="mobilenetv2_w1", **kwargs)
def mobilenetv2_w3d4(**kwargs):
"""
0.75 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenetv2(width_scale=0.75, model_name="mobilenetv2_w3d4", **kwargs)
def mobilenetv2_wd2(**kwargs):
"""
0.5 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenetv2(width_scale=0.5, model_name="mobilenetv2_wd2", **kwargs)
def mobilenetv2_wd4(**kwargs):
"""
0.25 MobileNetV2-224 model from 'MobileNetV2: Inverted Residuals and Linear Bottlenecks,'
https://arxiv.org/abs/1801.04381.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenetv2(width_scale=0.25, model_name="mobilenetv2_wd4", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
mobilenetv2_w1,
mobilenetv2_w3d4,
mobilenetv2_wd2,
mobilenetv2_wd4,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenetv2_w1 or weight_count == 3504960)
assert (model != mobilenetv2_w3d4 or weight_count == 2627592)
assert (model != mobilenetv2_wd2 or weight_count == 1964736)
assert (model != mobilenetv2_wd4 or weight_count == 1516392)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 12,232 | 30.939948 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/squeezenet.py | """
SqueezeNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,'
https://arxiv.org/abs/1602.07360.
"""
__all__ = ['SqueezeNet', 'squeezenet_v1_0', 'squeezenet_v1_1', 'squeezeresnet_v1_0', 'squeezeresnet_v1_1']
import os
import tensorflow as tf
from .common import conv2d, maxpool2d, is_channels_first, get_channel_axis, flatten
def fire_conv(x,
in_channels,
out_channels,
kernel_size,
padding,
data_format,
name="fire_conv"):
"""
SqueezeNet specific convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'fire_conv'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding,
use_bias=True,
data_format=data_format,
name=name + "/conv")
x = tf.nn.relu(x, name=name + "/activ")
return x
def fire_unit(x,
in_channels,
squeeze_channels,
expand1x1_channels,
expand3x3_channels,
residual,
data_format,
name="fire_unit"):
"""
SqueezeNet unit, so-called 'Fire' unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
squeeze_channels : int
Number of output channels for squeeze convolution blocks.
expand1x1_channels : int
Number of output channels for expand 1x1 convolution blocks.
expand3x3_channels : int
Number of output channels for expand 3x3 convolution blocks.
residual : bool
Whether use residual connection.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'fire_unit'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
if residual:
identity = x
x = fire_conv(
x=x,
in_channels=in_channels,
out_channels=squeeze_channels,
kernel_size=1,
padding=0,
data_format=data_format,
name=name + "/squeeze")
y1 = fire_conv(
x=x,
in_channels=squeeze_channels,
out_channels=expand1x1_channels,
kernel_size=1,
padding=0,
data_format=data_format,
name=name + "/expand1x1")
y2 = fire_conv(
x=x,
in_channels=squeeze_channels,
out_channels=expand3x3_channels,
kernel_size=3,
padding=1,
data_format=data_format,
name=name + "/expand3x3")
out = tf.concat([y1, y2], axis=get_channel_axis(data_format), name=name + "/concat")
if residual:
out = out + identity
return out
def squeeze_init_block(x,
in_channels,
out_channels,
kernel_size,
data_format,
name="squeeze_init_block"):
"""
ResNet specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'squeeze_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=2,
use_bias=True,
data_format=data_format,
name=name + "/conv")
x = tf.nn.relu(x, name=name + "/activ")
return x
class SqueezeNet(object):
"""
SqueezeNet model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size,'
https://arxiv.org/abs/1602.07360.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
residuals : bool
Whether to use residual units.
init_block_kernel_size : int or tuple/list of 2 int
The dimensions of the convolution window for the initial unit.
init_block_channels : int
Number of output channels for the initial unit.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
residuals,
init_block_kernel_size,
init_block_channels,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SqueezeNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.residuals = residuals
self.init_block_kernel_size = init_block_kernel_size
self.init_block_channels = init_block_channels
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = squeeze_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
kernel_size=self.init_block_kernel_size,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
ceil_mode=True,
data_format=self.data_format,
name="features/pool{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
expand_channels = out_channels // 2
squeeze_channels = out_channels // 8
x = fire_unit(
x=x,
in_channels=in_channels,
squeeze_channels=squeeze_channels,
expand1x1_channels=expand_channels,
expand3x3_channels=expand_channels,
residual=((self.residuals is not None) and (self.residuals[i][j] == 1)),
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.Dropout(
rate=0.5,
name="features/dropout")(
inputs=x,
training=training)
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=self.classes,
kernel_size=1,
data_format=self.data_format,
name="output/final_conv")
x = tf.nn.relu(x, name="output/final_activ")
x = tf.keras.layers.AveragePooling2D(
pool_size=13,
strides=1,
data_format=self.data_format,
name="output/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
return x
def get_squeezenet(version,
residual=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SqueezeNet model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('1.0' or '1.1').
residual : bool, default False
Whether to use residual connections.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if version == '1.0':
channels = [[128, 128, 256], [256, 384, 384, 512], [512]]
residuals = [[0, 1, 0], [1, 0, 1, 0], [1]]
init_block_kernel_size = 7
init_block_channels = 96
elif version == '1.1':
channels = [[128, 128], [256, 256], [384, 384, 512, 512]]
residuals = [[0, 1], [0, 1], [0, 1, 0, 1]]
init_block_kernel_size = 3
init_block_channels = 64
else:
raise ValueError("Unsupported SqueezeNet version {}".format(version))
if not residual:
residuals = None
net = SqueezeNet(
channels=channels,
residuals=residuals,
init_block_kernel_size=init_block_kernel_size,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def squeezenet_v1_0(**kwargs):
"""
SqueezeNet 'vanilla' model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model
size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenet(version="1.0", residual=False, model_name="squeezenet_v1_0", **kwargs)
def squeezenet_v1_1(**kwargs):
"""
SqueezeNet v1.1 model from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model
size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenet(version="1.1", residual=False, model_name="squeezenet_v1_1", **kwargs)
def squeezeresnet_v1_0(**kwargs):
"""
SqueezeNet model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and
<0.5MB model size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenet(version="1.0", residual=True, model_name="squeezeresnet_v1_0", **kwargs)
def squeezeresnet_v1_1(**kwargs):
"""
SqueezeNet v1.1 model with residual connections from 'SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and <0.5MB model size,' https://arxiv.org/abs/1602.07360.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_squeezenet(version="1.1", residual=True, model_name="squeezeresnet_v1_1", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
squeezenet_v1_0,
squeezenet_v1_1,
squeezeresnet_v1_0,
squeezeresnet_v1_1,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != squeezenet_v1_0 or weight_count == 1248424)
assert (model != squeezenet_v1_1 or weight_count == 1235496)
assert (model != squeezeresnet_v1_0 or weight_count == 1248424)
assert (model != squeezeresnet_v1_1 or weight_count == 1235496)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 14,788 | 29.810417 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/vgg.py | """
VGG for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
"""
__all__ = ['VGG', 'vgg11', 'vgg13', 'vgg16', 'vgg19', 'bn_vgg11', 'bn_vgg13', 'bn_vgg16', 'bn_vgg19', 'bn_vgg11b',
'bn_vgg13b', 'bn_vgg16b', 'bn_vgg19b']
import os
import tensorflow as tf
from .common import conv3x3_block, maxpool2d, is_channels_first, flatten
def vgg_dense(x,
in_channels,
out_channels,
training,
name="vgg_dense"):
"""
VGG specific dense block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
name : str, default 'vgg_dense'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
assert (in_channels > 0)
x = tf.keras.layers.Dense(
units=out_channels,
name=name + "/fc")(x)
x = tf.nn.relu(x, name=name + "/activ")
x = tf.keras.layers.Dropout(
rate=0.5,
name=name + "/dropout")(
inputs=x,
training=training)
return x
def vgg_output_block(x,
in_channels,
classes,
training,
name="vgg_output_block"):
"""
VGG specific output block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
name : str, default 'vgg_output_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = 4096
x = vgg_dense(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
training=training,
name=name + "/fc1")
x = vgg_dense(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
training=training,
name=name + "/fc2")
x = tf.keras.layers.Dense(
units=classes,
name=name + "/fc3")(x)
return x
class VGG(object):
"""
VGG models from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
use_bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default False
Whether to use BatchNorm layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
use_bias=True,
use_bn=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(VGG, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.use_bias = use_bias
self.use_bn = use_bn
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
use_bias=self.use_bias,
use_bn=self.use_bn,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = maxpool2d(
x=x,
pool_size=2,
strides=2,
padding=0,
data_format=self.data_format,
name="features/stage{}/pool".format(i + 1))
in_channels = in_channels * 7 * 7
# x = tf.reshape(x, [-1, in_channels])
x = flatten(
x=x,
data_format=self.data_format)
x = vgg_output_block(
x=x,
in_channels=in_channels,
classes=self.classes,
training=training,
name="output")
return x
def get_vgg(blocks,
use_bias=True,
use_bn=False,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create VGG model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
use_bias : bool, default True
Whether the convolution layer uses a bias vector.
use_bn : bool, default False
Whether to use BatchNorm layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 11:
layers = [1, 1, 2, 2, 2]
elif blocks == 13:
layers = [2, 2, 2, 2, 2]
elif blocks == 16:
layers = [2, 2, 3, 3, 3]
elif blocks == 19:
layers = [2, 2, 4, 4, 4]
else:
raise ValueError("Unsupported VGG with number of blocks: {}".format(blocks))
channels_per_layers = [64, 128, 256, 512, 512]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = VGG(
channels=channels,
use_bias=use_bias,
use_bn=use_bn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def vgg11(**kwargs):
"""
VGG-11 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=11, model_name="vgg11", **kwargs)
def vgg13(**kwargs):
"""
VGG-13 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=13, model_name="vgg13", **kwargs)
def vgg16(**kwargs):
"""
VGG-16 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=16, model_name="vgg16", **kwargs)
def vgg19(**kwargs):
"""
VGG-19 model from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=19, model_name="vgg19", **kwargs)
def bn_vgg11(**kwargs):
"""
VGG-11 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=11, use_bias=False, use_bn=True, model_name="bn_vgg11", **kwargs)
def bn_vgg13(**kwargs):
"""
VGG-13 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=13, use_bias=False, use_bn=True, model_name="bn_vgg13", **kwargs)
def bn_vgg16(**kwargs):
"""
VGG-16 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=16, use_bias=False, use_bn=True, model_name="bn_vgg16", **kwargs)
def bn_vgg19(**kwargs):
"""
VGG-19 model with batch normalization from 'Very Deep Convolutional Networks for Large-Scale Image Recognition,'
https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=19, use_bias=False, use_bn=True, model_name="bn_vgg19", **kwargs)
def bn_vgg11b(**kwargs):
"""
VGG-11 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=11, use_bias=True, use_bn=True, model_name="bn_vgg11b", **kwargs)
def bn_vgg13b(**kwargs):
"""
VGG-13 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=13, use_bias=True, use_bn=True, model_name="bn_vgg13b", **kwargs)
def bn_vgg16b(**kwargs):
"""
VGG-16 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=16, use_bias=True, use_bn=True, model_name="bn_vgg16b", **kwargs)
def bn_vgg19b(**kwargs):
"""
VGG-19 model with batch normalization and biases in convolution layers from 'Very Deep Convolutional Networks for
Large-Scale Image Recognition,' https://arxiv.org/abs/1409.1556.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_vgg(blocks=19, use_bias=True, use_bn=True, model_name="bn_vgg19b", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
vgg11,
vgg13,
vgg16,
vgg19,
bn_vgg11,
bn_vgg13,
bn_vgg16,
bn_vgg19,
bn_vgg11b,
bn_vgg13b,
bn_vgg16b,
bn_vgg19b,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != vgg11 or weight_count == 132863336)
assert (model != vgg13 or weight_count == 133047848)
assert (model != vgg16 or weight_count == 138357544)
assert (model != vgg19 or weight_count == 143667240)
assert (model != bn_vgg11 or weight_count == 132866088)
assert (model != bn_vgg13 or weight_count == 133050792)
assert (model != bn_vgg16 or weight_count == 138361768)
assert (model != bn_vgg19 or weight_count == 143672744)
assert (model != bn_vgg11b or weight_count == 132868840)
assert (model != bn_vgg13b or weight_count == 133053736)
assert (model != bn_vgg16b or weight_count == 138365992)
assert (model != bn_vgg19b or weight_count == 143678248)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 15,566 | 30.576065 | 117 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/mnasnet.py | """
MnasNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626.
"""
__all__ = ['MnasNet', 'mnasnet_b1', 'mnasnet_a1', 'mnasnet_small']
import os
import tensorflow as tf
from .common import is_channels_first, flatten, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block,\
se_block, round_channels
def dws_exp_se_res_unit(x,
in_channels,
out_channels,
strides=1,
use_kernel3=True,
exp_factor=1,
se_factor=0,
use_skip=True,
activation="relu",
training=False,
data_format="channels_last",
name="dws_exp_se_res_unit"):
"""
Depthwise separable expanded residual unit with SE-block. Here it used as MnasNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the second convolution layer.
use_kernel3 : bool, default True
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : int, default 1
Expansion factor for each unit.
se_factor : int, default 0
SE reduction factor for each unit.
use_skip : bool, default True
Whether to use skip connection.
activation : str, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'dws_exp_se_res_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
assert (exp_factor >= 1)
residual = (in_channels == out_channels) and (strides == 1) and use_skip
use_exp_conv = exp_factor > 1
use_se = se_factor > 0
mid_channels = exp_factor * in_channels
dwconv_block_fn = dwconv3x3_block if use_kernel3 else dwconv5x5_block
if residual:
identity = x
if use_exp_conv:
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
activation=activation,
training=training,
data_format=data_format,
name=name + "/exp_conv")
x = dwconv_block_fn(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation=activation,
training=training,
data_format=data_format,
name=name + "/dw_conv")
if use_se:
x = se_block(
x=x,
channels=mid_channels,
reduction=(exp_factor * se_factor),
approx_sigmoid=False,
round_mid=False,
activation=activation,
data_format=data_format,
name=name + "/se")
x = conv1x1_block(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
training=training,
data_format=data_format,
name=name + "/pw_conv")
if residual:
x = x + identity
return x
def mnas_init_block(x,
in_channels,
out_channels,
mid_channels,
use_skip,
training,
data_format,
name="mnas_init_block"):
"""
MnasNet specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
use_skip : bool
Whether to use skip connection in the second block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'mnas_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
training=training,
data_format=data_format,
name=name + "/conv1")
x = dws_exp_se_res_unit(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
use_skip=use_skip,
training=training,
data_format=data_format,
name=name + "/conv2")
return x
def mnas_final_block(x,
in_channels,
out_channels,
mid_channels,
use_skip,
training,
data_format,
name="mnas_final_block"):
"""
MnasNet specific final block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
use_skip : bool
Whether to use skip connection in the second block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'mnas_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = dws_exp_se_res_unit(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
exp_factor=6,
use_skip=use_skip,
training=training,
data_format=data_format,
name=name + "/conv1")
x = conv1x1_block(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
training=training,
data_format=data_format,
name=name + "/conv2")
return x
class MnasNet(object):
"""
MnasNet model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : list of 2 int
Number of output channels for the initial unit.
final_block_channels : list of 2 int
Number of output channels for the final block of the feature extractor.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
se_factors : list of list of int
SE reduction factor for each unit.
init_block_use_skip : bool
Whether to use skip connection in the initial unit.
final_block_use_skip : bool
Whether to use skip connection in the final block of the feature extractor.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
kernels3,
exp_factors,
se_factors,
init_block_use_skip,
final_block_use_skip,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(MnasNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.final_block_channels = final_block_channels
self.kernels3 = kernels3
self.exp_factors = exp_factors
self.se_factors = se_factors
self.init_block_use_skip = init_block_use_skip
self.final_block_use_skip = final_block_use_skip
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = mnas_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels[1],
mid_channels=self.init_block_channels[0],
use_skip=self.init_block_use_skip,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels[1]
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) else 1
use_kernel3 = self.kernels3[i][j] == 1
exp_factor = self.exp_factors[i][j]
se_factor = self.se_factors[i][j]
x = dws_exp_se_res_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
se_factor=se_factor,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = mnas_final_block(
x=x,
in_channels=in_channels,
out_channels=self.final_block_channels[1],
mid_channels=self.final_block_channels[0],
use_skip=self.final_block_use_skip,
training=training,
data_format=self.data_format,
name="features/final_block")
# in_channels = self.final_block_channels[1]
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_mnasnet(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".keras", "models"),
**kwargs):
"""
Create MnasNet model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('b1', 'a1' or 'small').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.keras/models'
Location for keeping the model parameters.
"""
if version == "b1":
init_block_channels = [32, 16]
final_block_channels = [320, 1280]
channels = [[24, 24, 24], [40, 40, 40], [80, 80, 80, 96, 96], [192, 192, 192, 192]]
kernels3 = [[1, 1, 1], [0, 0, 0], [0, 0, 0, 1, 1], [0, 0, 0, 0]]
exp_factors = [[3, 3, 3], [3, 3, 3], [6, 6, 6, 6, 6], [6, 6, 6, 6]]
se_factors = [[0, 0, 0], [0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0]]
init_block_use_skip = False
final_block_use_skip = False
elif version == "a1":
init_block_channels = [32, 16]
final_block_channels = [320, 1280]
channels = [[24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]]
kernels3 = [[1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]]
exp_factors = [[6, 6], [3, 3, 3], [6, 6, 6, 6, 6, 6], [6, 6, 6]]
se_factors = [[0, 0], [4, 4, 4], [0, 0, 0, 0, 4, 4], [4, 4, 4]]
init_block_use_skip = False
final_block_use_skip = True
elif version == "small":
init_block_channels = [8, 8]
final_block_channels = [144, 1280]
channels = [[16], [16, 16], [32, 32, 32, 32, 32, 32, 32], [88, 88, 88]]
kernels3 = [[1], [1, 1], [0, 0, 0, 0, 1, 1, 1], [0, 0, 0]]
exp_factors = [[3], [6, 6], [6, 6, 6, 6, 6, 6, 6], [6, 6, 6]]
se_factors = [[0], [0, 0], [4, 4, 4, 4, 4, 4, 4], [4, 4, 4]]
init_block_use_skip = True
final_block_use_skip = True
else:
raise ValueError("Unsupported MnasNet version {}".format(version))
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = round_channels(init_block_channels * width_scale)
net = MnasNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
kernels3=kernels3,
exp_factors=exp_factors,
se_factors=se_factors,
init_block_use_skip=init_block_use_skip,
final_block_use_skip=final_block_use_skip,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def mnasnet_b1(**kwargs):
"""
MnasNet-B1 model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mnasnet(version="b1", width_scale=1.0, model_name="mnasnet_b1", **kwargs)
def mnasnet_a1(**kwargs):
"""
MnasNet-A1 model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mnasnet(version="a1", width_scale=1.0, model_name="mnasnet_a1", **kwargs)
def mnasnet_small(**kwargs):
"""
MnasNet-Small model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'
https://arxiv.org/abs/1807.11626.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mnasnet(version="small", width_scale=1.0, model_name="mnasnet_small", **kwargs)
def _test():
import numpy as np
# import logging
# logging.getLogger("tensorflow").disabled = True
data_format = "channels_last"
pretrained = False
models = [
mnasnet_b1,
mnasnet_a1,
mnasnet_small,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.compat.v1.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.compat.v1.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mnasnet_b1 or weight_count == 4383312)
assert (model != mnasnet_a1 or weight_count == 3887038)
assert (model != mnasnet_small or weight_count == 2030264)
with tf.compat.v1.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.compat.v1.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.compat.v1.reset_default_graph()
if __name__ == "__main__":
_test()
| 17,642 | 32.478178 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/seresnet.py | """
SE-ResNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEResNet', 'seresnet10', 'seresnet12', 'seresnet14', 'seresnet16', 'seresnet18', 'seresnet26',
'seresnetbc26b', 'seresnet34', 'seresnetbc38b', 'seresnet50', 'seresnet50b', 'seresnet101', 'seresnet101b',
'seresnet152', 'seresnet152b', 'seresnet200', 'seresnet200b']
import os
import tensorflow as tf
from .common import conv1x1_block, se_block, is_channels_first, flatten
from .resnet import res_block, res_bottleneck_block, res_init_block
def seres_unit(x,
in_channels,
out_channels,
strides,
bottleneck,
conv1_stride,
training,
data_format,
name="seres_unit"):
"""
ResNet unit with residual connection.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'seres_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
resize_identity = (in_channels != out_channels) or (strides != 1)
if resize_identity:
identity = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
training=training,
data_format=data_format,
name=name + "/identity_conv")
else:
identity = x
if bottleneck:
x = res_bottleneck_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_stride=conv1_stride,
training=training,
data_format=data_format,
name=name + "/body")
else:
x = res_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
training=training,
data_format=data_format,
name=name + "/body")
x = se_block(
x=x,
channels=out_channels,
data_format=data_format,
name=name + "/se")
x = x + identity
x = tf.nn.relu(x, name=name + "/activ")
return x
class SEResNet(object):
"""
SE-ResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SEResNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.bottleneck = bottleneck
self.conv1_stride = conv1_stride
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = res_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
x = seres_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=self.bottleneck,
conv1_stride=self.conv1_stride,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_seresnet(blocks,
bottleneck=None,
conv1_stride=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SE-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported SE-ResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def seresnet10(**kwargs):
"""
SE-ResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=10, model_name="seresnet10", **kwargs)
def seresnet12(**kwargs):
"""
SE-ResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=12, model_name="seresnet12", **kwargs)
def seresnet14(**kwargs):
"""
SE-ResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=14, model_name="seresnet14", **kwargs)
def seresnet16(**kwargs):
"""
SE-ResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=16, model_name="seresnet16", **kwargs)
def seresnet18(**kwargs):
"""
SE-ResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=18, model_name="seresnet18", **kwargs)
def seresnet26(**kwargs):
"""
SE-ResNet-26 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=26, bottleneck=False, model_name="seresnet26", **kwargs)
def seresnetbc26b(**kwargs):
"""
SE-ResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="seresnetbc26b", **kwargs)
def seresnet34(**kwargs):
"""
SE-ResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=34, model_name="seresnet34", **kwargs)
def seresnetbc38b(**kwargs):
"""
SE-ResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model (bottleneck compressed).
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="seresnetbc38b", **kwargs)
def seresnet50(**kwargs):
"""
SE-ResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=50, model_name="seresnet50", **kwargs)
def seresnet50b(**kwargs):
"""
SE-ResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=50, conv1_stride=False, model_name="seresnet50b", **kwargs)
def seresnet101(**kwargs):
"""
SE-ResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=101, model_name="seresnet101", **kwargs)
def seresnet101b(**kwargs):
"""
SE-ResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=101, conv1_stride=False, model_name="seresnet101b", **kwargs)
def seresnet152(**kwargs):
"""
SE-ResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=152, model_name="seresnet152", **kwargs)
def seresnet152b(**kwargs):
"""
SE-ResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=152, conv1_stride=False, model_name="seresnet152b", **kwargs)
def seresnet200(**kwargs):
"""
SE-ResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=200, model_name="seresnet200", **kwargs)
def seresnet200b(**kwargs):
"""
SE-ResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnet(blocks=200, conv1_stride=False, model_name="seresnet200b", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
seresnet10,
seresnet12,
seresnet14,
seresnet16,
seresnet18,
seresnet26,
seresnetbc26b,
seresnet34,
seresnetbc38b,
seresnet50,
seresnet50b,
seresnet101,
seresnet101b,
seresnet152,
seresnet152b,
seresnet200,
seresnet200b,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnet10 or weight_count == 5463332)
assert (model != seresnet12 or weight_count == 5537896)
assert (model != seresnet14 or weight_count == 5835504)
assert (model != seresnet16 or weight_count == 7024640)
assert (model != seresnet18 or weight_count == 11778592)
assert (model != seresnet26 or weight_count == 18093852)
assert (model != seresnetbc26b or weight_count == 17395976)
assert (model != seresnet34 or weight_count == 21958868)
assert (model != seresnetbc38b or weight_count == 24026616)
assert (model != seresnet50 or weight_count == 28088024)
assert (model != seresnet50b or weight_count == 28088024)
assert (model != seresnet101 or weight_count == 49326872)
assert (model != seresnet101b or weight_count == 49326872)
assert (model != seresnet152 or weight_count == 66821848)
assert (model != seresnet152b or weight_count == 66821848)
assert (model != seresnet200 or weight_count == 71835864)
assert (model != seresnet200b or weight_count == 71835864)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 21,991 | 30.194326 | 118 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/densenet.py | """
DenseNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
"""
__all__ = ['DenseNet', 'densenet121', 'densenet161', 'densenet169', 'densenet201']
import os
import tensorflow as tf
from .common import pre_conv1x1_block, pre_conv3x3_block, is_channels_first, get_channel_axis, flatten
from .preresnet import preres_init_block, preres_activation
def dense_unit(x,
in_channels,
out_channels,
dropout_rate,
training,
data_format,
name="dense_unit"):
"""
DenseNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'dense_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
bn_size = 4
inc_channels = out_channels - in_channels
mid_channels = inc_channels * bn_size
identity = x
x = pre_conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
training=training,
data_format=data_format,
name=name + "/conv1")
x = pre_conv3x3_block(
x=x,
in_channels=mid_channels,
out_channels=inc_channels,
training=training,
data_format=data_format,
name=name + "/conv2")
use_dropout = (dropout_rate != 0.0)
if use_dropout:
x = tf.keras.layers.Dropout(
rate=dropout_rate,
name=name + "dropout")(
inputs=x,
training=training)
x = tf.concat([identity, x], axis=get_channel_axis(data_format), name=name + "/concat")
return x
def transition_block(x,
in_channels,
out_channels,
training,
data_format,
name="transition_block"):
"""
DenseNet's auxiliary block, which can be treated as the initial part of the DenseNet unit, triggered only in the
first unit of each stage.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'transition_block'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = pre_conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
training=training,
data_format=data_format,
name=name + "/conv")
x = tf.keras.layers.AveragePooling2D(
pool_size=2,
strides=2,
data_format=data_format,
name=name + "/pool")(x)
return x
class DenseNet(object):
"""
DenseNet model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
dropout_rate : float, default 0.0
Parameter of Dropout layer. Faction of the input units to drop.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
dropout_rate=0.0,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(DenseNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.dropout_rate = dropout_rate
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = preres_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
if i != 0:
x = transition_block(
x=x,
in_channels=in_channels,
out_channels=(in_channels // 2),
training=training,
data_format=self.data_format,
name="features/stage{}/trans{}".format(i + 1, i + 1))
in_channels = in_channels // 2
for j, out_channels in enumerate(channels_per_stage):
x = dense_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
dropout_rate=self.dropout_rate,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = preres_activation(
x=x,
training=training,
data_format=self.data_format,
name="features/post_activ")
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_densenet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create DenseNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if blocks == 121:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 24, 16]
elif blocks == 161:
init_block_channels = 96
growth_rate = 48
layers = [6, 12, 36, 24]
elif blocks == 169:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 32, 32]
elif blocks == 201:
init_block_channels = 64
growth_rate = 32
layers = [6, 12, 48, 32]
else:
raise ValueError("Unsupported DenseNet version with number of layers {}".format(blocks))
from functools import reduce
channels = reduce(lambda xi, yi:
xi + [reduce(lambda xj, yj:
xj + [xj[-1] + yj],
[growth_rate] * yi,
[xi[-1][-1] // 2])[1:]],
layers,
[[init_block_channels * 2]])[1:]
net = DenseNet(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def densenet121(**kwargs):
"""
DenseNet-121 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_densenet(blocks=121, model_name="densenet121", **kwargs)
def densenet161(**kwargs):
"""
DenseNet-161 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_densenet(blocks=161, model_name="densenet161", **kwargs)
def densenet169(**kwargs):
"""
DenseNet-169 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_densenet(blocks=169, model_name="densenet169", **kwargs)
def densenet201(**kwargs):
"""
DenseNet-201 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_densenet(blocks=201, model_name="densenet201", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
densenet121,
densenet161,
densenet169,
densenet201,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != densenet121 or weight_count == 7978856)
assert (model != densenet161 or weight_count == 28681000)
assert (model != densenet169 or weight_count == 14149480)
assert (model != densenet201 or weight_count == 20013928)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 13,065 | 29.816038 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/seresnext.py | """
SE-ResNeXt for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEResNeXt', 'seresnext50_32x4d', 'seresnext101_32x4d', 'seresnext101_64x4d']
import os
import tensorflow as tf
from .common import conv1x1_block, se_block, is_channels_first, flatten
from .resnet import res_init_block
from .resnext import resnext_bottleneck
def seresnext_unit(x,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
training,
data_format,
name="seresnext_unit"):
"""
SE-ResNeXt unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'seresnext_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
resize_identity = (in_channels != out_channels) or (strides != 1)
if resize_identity:
identity = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
training=training,
data_format=data_format,
name=name + "/identity_conv")
else:
identity = x
x = resnext_bottleneck(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
training=training,
data_format=data_format,
name=name + "/body")
x = se_block(
x=x,
channels=out_channels,
data_format=data_format,
name=name + "/se")
x = x + identity
x = tf.nn.relu(x, name=name + "/activ")
return x
class SEResNeXt(object):
"""
SE-ResNeXt model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SEResNeXt, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = res_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
x = seresnext_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_seresnext(blocks,
cardinality,
bottleneck_width,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SE-ResNeXt model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported SE-ResNeXt with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEResNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def seresnext50_32x4d(**kwargs):
"""
SE-ResNeXt-50 (32x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="seresnext50_32x4d", **kwargs)
def seresnext101_32x4d(**kwargs):
"""
SE-ResNeXt-101 (32x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="seresnext101_32x4d", **kwargs)
def seresnext101_64x4d(**kwargs):
"""
SE-ResNeXt-101 (64x4d) model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_seresnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="seresnext101_64x4d", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
seresnext50_32x4d,
seresnext101_32x4d,
seresnext101_64x4d,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnext50_32x4d or weight_count == 27559896)
assert (model != seresnext101_32x4d or weight_count == 48955416)
assert (model != seresnext101_64x4d or weight_count == 88232984)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 10,990 | 30.048023 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/mobilenetv3.py | """
MobileNetV3 for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
"""
__all__ = ['MobileNetV3', 'mobilenetv3_small_w7d20', 'mobilenetv3_small_wd2', 'mobilenetv3_small_w3d4',
'mobilenetv3_small_w1', 'mobilenetv3_small_w5d4', 'mobilenetv3_large_w7d20', 'mobilenetv3_large_wd2',
'mobilenetv3_large_w3d4', 'mobilenetv3_large_w1', 'mobilenetv3_large_w5d4']
import os
import tensorflow as tf
from .common import round_channels, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block,\
se_block, hswish, is_channels_first, flatten
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior()
def mobilenetv3_unit(x,
in_channels,
out_channels,
exp_channels,
strides,
use_kernel3,
activation,
use_se,
training,
data_format,
name="mobilenetv3_unit"):
"""
MobileNetV3 unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
exp_channels : int
Number of middle (expanded) channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
activation : str
Activation function or name of activation function.
use_se : bool
Whether to use SE-module.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'mobilenetv3_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
assert (exp_channels >= out_channels)
residual = (in_channels == out_channels) and (strides == 1)
use_exp_conv = exp_channels != out_channels
mid_channels = exp_channels
if residual:
identity = x
if use_exp_conv:
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
activation=activation,
training=training,
data_format=data_format,
name=name + "/exp_conv")
if use_kernel3:
x = dwconv3x3_block(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation=activation,
training=training,
data_format=data_format,
name=name + "/conv1")
else:
x = dwconv5x5_block(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation=activation,
name=name + "/conv1")
if use_se:
x = se_block(
x=x,
channels=mid_channels,
reduction=4,
approx_sigmoid=True,
round_mid=True,
data_format=data_format,
name=name + "/se")
x = conv1x1_block(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
training=training,
data_format=data_format,
name=name + "/conv2")
if residual:
x = x + identity
return x
def mobilenetv3_final_block(x,
in_channels,
out_channels,
use_se,
training,
data_format,
name="mobilenetv3_final_block"):
"""
MobileNetV3 final block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
use_se : bool
Whether to use SE-module.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'mobilenetv3_final_block'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
activation="hswish",
training=training,
data_format=data_format,
name=name + "/conv")
if use_se:
x = se_block(
x=x,
channels=out_channels,
reduction=4,
approx_sigmoid=True,
round_mid=True,
data_format=data_format,
name=name + "/se")
return x
def mobilenetv3_classifier(x,
in_channels,
out_channels,
mid_channels,
dropout_rate,
training,
data_format,
name="mobilenetv3_final_block"):
"""
MobileNetV3 classifier.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
dropout_rate : float
Parameter of Dropout layer. Faction of the input units to drop.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'mobilenetv3_classifier'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv1x1(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name=name + "/conv1")
x = hswish(x, name=name + "/hswish")
use_dropout = (dropout_rate != 0.0)
if use_dropout:
x = tf.keras.layers.Dropout(
rate=dropout_rate,
name=name + "dropout")(
inputs=x,
training=training)
x = conv1x1(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True,
data_format=data_format,
name=name + "/conv2")
return x
class MobileNetV3(object):
"""
MobileNetV3 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
exp_channels : list of list of int
Number of middle (expanded) channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
classifier_mid_channels : int
Number of middle channels for classifier.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
use_relu : list of list of int/bool
Using ReLU activation flag for each unit.
use_se : list of list of int/bool
Using SE-block flag for each unit.
first_stride : bool
Whether to use stride for the first stage.
final_use_se : bool
Whether to use SE-module in the final block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
exp_channels,
init_block_channels,
final_block_channels,
classifier_mid_channels,
kernels3,
use_relu,
use_se,
first_stride,
final_use_se,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(MobileNetV3, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.exp_channels = exp_channels
self.init_block_channels = init_block_channels
self.final_block_channels = final_block_channels
self.classifier_mid_channels = classifier_mid_channels
self.kernels3 = kernels3
self.use_relu = use_relu
self.use_se = use_se
self.first_stride = first_stride
self.final_use_se = final_use_se
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
strides=2,
activation="hswish",
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
exp_channels_ij = self.exp_channels[i][j]
strides = 2 if (j == 0) and ((i != 0) or self.first_stride) else 1
use_kernel3 = self.kernels3[i][j] == 1
activation = "relu" if self.use_relu[i][j] == 1 else "hswish"
use_se_flag = self.use_se[i][j] == 1
x = mobilenetv3_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
exp_channels=exp_channels_ij,
use_kernel3=use_kernel3,
strides=strides,
activation=activation,
use_se=use_se_flag,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = mobilenetv3_final_block(
x=x,
in_channels=in_channels,
out_channels=self.final_block_channels,
use_se=self.final_use_se,
training=training,
data_format=self.data_format,
name="features/final_block")
in_channels = self.final_block_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
x = mobilenetv3_classifier(
x=x,
in_channels=in_channels,
out_channels=self.classes,
mid_channels=self.classifier_mid_channels,
dropout_rate=0.2,
training=training,
data_format=self.data_format,
name="output")
x = flatten(
x=x,
data_format=self.data_format)
return x
def get_mobilenetv3(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create MobileNetV3 model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('small' or 'large').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if version == "small":
init_block_channels = 16
channels = [[16], [24, 24], [40, 40, 40, 48, 48], [96, 96, 96]]
exp_channels = [[16], [72, 88], [96, 240, 240, 120, 144], [288, 576, 576]]
kernels3 = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]]
use_relu = [[1], [1, 1], [0, 0, 0, 0, 0], [0, 0, 0]]
use_se = [[1], [0, 0], [1, 1, 1, 1, 1], [1, 1, 1]]
first_stride = True
final_block_channels = 576
elif version == "large":
init_block_channels = 16
channels = [[16], [24, 24], [40, 40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160]]
exp_channels = [[16], [64, 72], [72, 120, 120], [240, 200, 184, 184, 480, 672], [672, 960, 960]]
kernels3 = [[1], [1, 1], [0, 0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0]]
use_relu = [[1], [1, 1], [1, 1, 1], [0, 0, 0, 0, 0, 0], [0, 0, 0]]
use_se = [[0], [0, 0], [1, 1, 1], [0, 0, 0, 0, 1, 1], [1, 1, 1]]
first_stride = False
final_block_channels = 960
else:
raise ValueError("Unsupported MobileNetV3 version {}".format(version))
final_use_se = False
classifier_mid_channels = 1280
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale) for cij in ci] for ci in channels]
exp_channels = [[round_channels(cij * width_scale) for cij in ci] for ci in exp_channels]
init_block_channels = round_channels(init_block_channels * width_scale)
if width_scale > 1.0:
final_block_channels = round_channels(final_block_channels * width_scale)
net = MobileNetV3(
channels=channels,
exp_channels=exp_channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
classifier_mid_channels=classifier_mid_channels,
kernels3=kernels3,
use_relu=use_relu,
use_se=use_se,
first_stride=first_stride,
final_use_se=final_use_se,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def mobilenetv3_small_w7d20(**kwargs):
"""
MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs)
def mobilenetv3_small_wd2(**kwargs):
"""
MobileNetV3 Small 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=0.5, model_name="mobilenetv3_small_wd2", **kwargs)
def mobilenetv3_small_w3d4(**kwargs):
"""
MobileNetV3 Small 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=0.75, model_name="mobilenetv3_small_w3d4", **kwargs)
def mobilenetv3_small_w1(**kwargs):
"""
MobileNetV3 Small 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=1.0, model_name="mobilenetv3_small_w1", **kwargs)
def mobilenetv3_small_w5d4(**kwargs):
"""
MobileNetV3 Small 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="small", width_scale=1.25, model_name="mobilenetv3_small_w5d4", **kwargs)
def mobilenetv3_large_w7d20(**kwargs):
"""
MobileNetV3 Small 224/0.35 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=0.35, model_name="mobilenetv3_small_w7d20", **kwargs)
def mobilenetv3_large_wd2(**kwargs):
"""
MobileNetV3 Large 224/0.5 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=0.5, model_name="mobilenetv3_large_wd2", **kwargs)
def mobilenetv3_large_w3d4(**kwargs):
"""
MobileNetV3 Large 224/0.75 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=0.75, model_name="mobilenetv3_large_w3d4", **kwargs)
def mobilenetv3_large_w1(**kwargs):
"""
MobileNetV3 Large 224/1.0 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=1.0, model_name="mobilenetv3_large_w1", **kwargs)
def mobilenetv3_large_w5d4(**kwargs):
"""
MobileNetV3 Large 224/1.25 model from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_mobilenetv3(version="large", width_scale=1.25, model_name="mobilenetv3_large_w5d4", **kwargs)
def _test():
import numpy as np
# import logging
# logging.getLogger("tensorflow").disabled = True
data_format = "channels_last"
pretrained = False
models = [
mobilenetv3_small_w7d20,
mobilenetv3_small_wd2,
mobilenetv3_small_w3d4,
mobilenetv3_small_w1,
mobilenetv3_small_w5d4,
mobilenetv3_large_w7d20,
mobilenetv3_large_wd2,
mobilenetv3_large_w3d4,
mobilenetv3_large_w1,
mobilenetv3_large_w5d4,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.compat.v1.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.compat.v1.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenetv3_small_w7d20 or weight_count == 2159600)
assert (model != mobilenetv3_small_wd2 or weight_count == 2288976)
assert (model != mobilenetv3_small_w3d4 or weight_count == 2581312)
assert (model != mobilenetv3_small_w1 or weight_count == 2945288)
assert (model != mobilenetv3_small_w5d4 or weight_count == 3643632)
assert (model != mobilenetv3_large_w7d20 or weight_count == 2943080)
assert (model != mobilenetv3_large_wd2 or weight_count == 3334896)
assert (model != mobilenetv3_large_w3d4 or weight_count == 4263496)
assert (model != mobilenetv3_large_w1 or weight_count == 5481752)
assert (model != mobilenetv3_large_w5d4 or weight_count == 7459144)
with tf.compat.v1.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.compat.v1.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.compat.v1.reset_default_graph()
if __name__ == "__main__":
_test()
| 22,437 | 33.048558 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/sepreresnet.py | """
SE-PreResNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEPreResNet', 'sepreresnet10', 'sepreresnet12', 'sepreresnet14', 'sepreresnet16', 'sepreresnet18',
'sepreresnet26', 'sepreresnetbc26b', 'sepreresnet34', 'sepreresnetbc38b', 'sepreresnet50', 'sepreresnet50b',
'sepreresnet101', 'sepreresnet101b', 'sepreresnet152', 'sepreresnet152b', 'sepreresnet200',
'sepreresnet200b']
import os
import tensorflow as tf
from .common import conv1x1, se_block, is_channels_first, flatten
from .preresnet import preres_block, preres_bottleneck_block, preres_init_block, preres_activation
def sepreres_unit(x,
in_channels,
out_channels,
strides,
bottleneck,
conv1_stride,
training,
data_format,
name="sepreres_unit"):
"""
SE-PreResNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'sepreres_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
identity = x
if bottleneck:
x, x_pre_activ = preres_bottleneck_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
conv1_stride=conv1_stride,
training=training,
data_format=data_format,
name=name + "/body")
else:
x, x_pre_activ = preres_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
training=training,
data_format=data_format,
name=name + "/body")
x = se_block(
x=x,
channels=out_channels,
data_format=data_format,
name=name + "/se")
resize_identity = (in_channels != out_channels) or (strides != 1)
if resize_identity:
identity = conv1x1(
x=x_pre_activ,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
data_format=data_format,
name=name + "/identity_conv/conv")
x = x + identity
return x
class SEPreResNet(object):
"""
SE-PreResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SEPreResNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.bottleneck = bottleneck
self.conv1_stride = conv1_stride
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = preres_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
x = sepreres_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bottleneck=self.bottleneck,
conv1_stride=self.conv1_stride,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = preres_activation(
x=x,
training=training,
data_format=self.data_format,
name="features/post_activ")
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_sepreresnet(blocks,
bottleneck=None,
conv1_stride=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SE-PreResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported SE-PreResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def sepreresnet10(**kwargs):
"""
SE-PreResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=10, model_name="sepreresnet10", **kwargs)
def sepreresnet12(**kwargs):
"""
SE-PreResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=12, model_name="sepreresnet12", **kwargs)
def sepreresnet14(**kwargs):
"""
SE-PreResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=14, model_name="sepreresnet14", **kwargs)
def sepreresnet16(**kwargs):
"""
SE-PreResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=16, model_name="sepreresnet16", **kwargs)
def sepreresnet18(**kwargs):
"""
SE-PreResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=18, model_name="sepreresnet18", **kwargs)
def sepreresnet26(**kwargs):
"""
SE-PreResNet-26 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=26, model_name="sepreresnet26", **kwargs)
def sepreresnetbc26b(**kwargs):
"""
SE-PreResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc26b", **kwargs)
def sepreresnet34(**kwargs):
"""
SE-PreResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=34, model_name="sepreresnet34", **kwargs)
def sepreresnetbc38b(**kwargs):
"""
SE-PreResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc38b", **kwargs)
def sepreresnet50(**kwargs):
"""
SE-PreResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=50, model_name="sepreresnet50", **kwargs)
def sepreresnet50b(**kwargs):
"""
SE-PreResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=50, conv1_stride=False, model_name="sepreresnet50b", **kwargs)
def sepreresnet101(**kwargs):
"""
SE-PreResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=101, model_name="sepreresnet101", **kwargs)
def sepreresnet101b(**kwargs):
"""
SE-PreResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=101, conv1_stride=False, model_name="sepreresnet101b", **kwargs)
def sepreresnet152(**kwargs):
"""
SE-PreResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=152, model_name="sepreresnet152", **kwargs)
def sepreresnet152b(**kwargs):
"""
SE-PreResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=152, conv1_stride=False, model_name="sepreresnet152b", **kwargs)
def sepreresnet200(**kwargs):
"""
SE-PreResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an
experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=200, model_name="sepreresnet200", **kwargs)
def sepreresnet200b(**kwargs):
"""
SE-PreResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_sepreresnet(blocks=200, conv1_stride=False, model_name="sepreresnet200b", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
sepreresnet10,
sepreresnet12,
sepreresnet14,
sepreresnet16,
sepreresnet18,
sepreresnet26,
sepreresnetbc26b,
sepreresnet34,
sepreresnetbc38b,
sepreresnet50,
sepreresnet50b,
sepreresnet101,
sepreresnet101b,
sepreresnet152,
sepreresnet152b,
sepreresnet200,
sepreresnet200b,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sepreresnet10 or weight_count == 5461668)
assert (model != sepreresnet12 or weight_count == 5536232)
assert (model != sepreresnet14 or weight_count == 5833840)
assert (model != sepreresnet16 or weight_count == 7022976)
assert (model != sepreresnet18 or weight_count == 11776928)
assert (model != sepreresnet26 or weight_count == 18092188)
assert (model != sepreresnetbc26b or weight_count == 17388424)
assert (model != sepreresnet34 or weight_count == 21957204)
assert (model != sepreresnetbc38b or weight_count == 24019064)
assert (model != sepreresnet50 or weight_count == 28080472)
assert (model != sepreresnet50b or weight_count == 28080472)
assert (model != sepreresnet101 or weight_count == 49319320)
assert (model != sepreresnet101b or weight_count == 49319320)
assert (model != sepreresnet152 or weight_count == 66814296)
assert (model != sepreresnet152b or weight_count == 66814296)
assert (model != sepreresnet200 or weight_count == 71828312)
assert (model != sepreresnet200b or weight_count == 71828312)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 22,299 | 30.766382 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/resnext.py | """
ResNeXt for ImageNet-1K, implemented in TensorFlow.
Original papers: 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431.
"""
__all__ = ['ResNeXt', 'resnext14_16x4d', 'resnext14_32x2d', 'resnext14_32x4d', 'resnext26_16x4d', 'resnext26_32x2d',
'resnext26_32x4d', 'resnext38_32x4d', 'resnext50_32x4d', 'resnext101_32x4d', 'resnext101_64x4d',
'resnext_bottleneck']
import os
import math
import tensorflow as tf
from .common import conv1x1_block, conv3x3_block, is_channels_first, flatten
from .resnet import res_init_block
def resnext_bottleneck(x,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bottleneck_factor=4,
training=False,
data_format="channels_last",
name="resnext_bottleneck"):
"""
ResNeXt bottleneck block for residual path in ResNeXt unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bottleneck_factor : int, default 4
Bottleneck factor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'resnext_bottleneck'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = out_channels // bottleneck_factor
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=group_width,
training=training,
data_format=data_format,
name=name + "/conv1")
x = conv3x3_block(
x=x,
in_channels=group_width,
out_channels=group_width,
strides=strides,
groups=cardinality,
training=training,
data_format=data_format,
name=name + "/conv2")
x = conv1x1_block(
x=x,
in_channels=group_width,
out_channels=out_channels,
activation=None,
training=training,
data_format=data_format,
name=name + "/conv3")
return x
def resnext_unit(x,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
training,
data_format,
name="resnext_unit"):
"""
ResNeXt unit with residual connection.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'resnext_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
resize_identity = (in_channels != out_channels) or (strides != 1)
if resize_identity:
identity = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
training=training,
data_format=data_format,
name=name + "/identity_conv")
else:
identity = x
x = resnext_bottleneck(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
training=training,
data_format=data_format,
name=name + "/body")
x = x + identity
x = tf.nn.relu(x, name=name + "/activ")
return x
class ResNeXt(object):
"""
ResNeXt model from 'Aggregated Residual Transformations for Deep Neural Networks,' http://arxiv.org/abs/1611.05431.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ResNeXt, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = res_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
x = resnext_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_resnext(blocks,
cardinality,
bottleneck_width,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ResNeXt model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if blocks == 14:
layers = [1, 1, 1, 1]
elif blocks == 26:
layers = [2, 2, 2, 2]
elif blocks == 38:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported ResNeXt with number of blocks: {}".format(blocks))
assert (sum(layers) * 3 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = ResNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def resnext14_16x4d(**kwargs):
"""
ResNeXt-14 (16x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=14, cardinality=16, bottleneck_width=4, model_name="resnext14_16x4d", **kwargs)
def resnext14_32x2d(**kwargs):
"""
ResNeXt-14 (32x2d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=14, cardinality=32, bottleneck_width=2, model_name="resnext14_32x2d", **kwargs)
def resnext14_32x4d(**kwargs):
"""
ResNeXt-14 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=14, cardinality=32, bottleneck_width=4, model_name="resnext14_32x4d", **kwargs)
def resnext26_16x4d(**kwargs):
"""
ResNeXt-26 (16x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=26, cardinality=16, bottleneck_width=4, model_name="resnext26_16x4d", **kwargs)
def resnext26_32x2d(**kwargs):
"""
ResNeXt-26 (32x2d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=26, cardinality=32, bottleneck_width=2, model_name="resnext26_32x2d", **kwargs)
def resnext26_32x4d(**kwargs):
"""
ResNeXt-26 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=26, cardinality=32, bottleneck_width=4, model_name="resnext26_32x4d", **kwargs)
def resnext38_32x4d(**kwargs):
"""
ResNeXt-38 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=38, cardinality=32, bottleneck_width=4, model_name="resnext38_32x4d", **kwargs)
def resnext50_32x4d(**kwargs):
"""
ResNeXt-50 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=50, cardinality=32, bottleneck_width=4, model_name="resnext50_32x4d", **kwargs)
def resnext101_32x4d(**kwargs):
"""
ResNeXt-101 (32x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=101, cardinality=32, bottleneck_width=4, model_name="resnext101_32x4d", **kwargs)
def resnext101_64x4d(**kwargs):
"""
ResNeXt-101 (64x4d) model from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_resnext(blocks=101, cardinality=64, bottleneck_width=4, model_name="resnext101_64x4d", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
resnext14_16x4d,
resnext14_32x2d,
resnext14_32x4d,
resnext26_16x4d,
resnext26_32x2d,
resnext26_32x4d,
resnext38_32x4d,
resnext50_32x4d,
resnext101_32x4d,
resnext101_64x4d,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != resnext14_16x4d or weight_count == 7127336)
assert (model != resnext14_32x2d or weight_count == 7029416)
assert (model != resnext14_32x4d or weight_count == 9411880)
assert (model != resnext26_16x4d or weight_count == 10119976)
assert (model != resnext26_32x2d or weight_count == 9924136)
assert (model != resnext26_32x4d or weight_count == 15389480)
assert (model != resnext38_32x4d or weight_count == 21367080)
assert (model != resnext50_32x4d or weight_count == 25028904)
assert (model != resnext101_32x4d or weight_count == 44177704)
assert (model != resnext101_64x4d or weight_count == 83455272)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 18,384 | 30.320273 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/senet.py | """
SENet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SENet', 'senet16', 'senet28', 'senet40', 'senet52', 'senet103', 'senet154']
import os
import math
import tensorflow as tf
from .common import conv1x1_block, conv3x3_block, maxpool2d, se_block, is_channels_first, flatten
def senet_bottleneck(x,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
training,
data_format,
name="senet_bottleneck"):
"""
SENet bottleneck block for residual path in SENet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'senet_bottleneck'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = out_channels // 4
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
group_width2 = group_width // 2
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=group_width2,
training=training,
data_format=data_format,
name=name + "/conv1")
x = conv3x3_block(
x=x,
in_channels=group_width2,
out_channels=group_width,
strides=strides,
groups=cardinality,
training=training,
data_format=data_format,
name=name + "/conv2")
x = conv1x1_block(
x=x,
in_channels=group_width,
out_channels=out_channels,
activation=None,
training=training,
data_format=data_format,
name=name + "/conv3")
return x
def senet_unit(x,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
identity_conv3x3,
training,
data_format,
name="senet_unit"):
"""
SENet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
identity_conv3x3 : bool, default False
Whether to use 3x3 convolution in the identity link.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'senet_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
resize_identity = (in_channels != out_channels) or (strides != 1)
if resize_identity:
if identity_conv3x3:
identity = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
training=training,
data_format=data_format,
name=name + "/identity_conv")
else:
identity = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activation=None,
training=training,
data_format=data_format,
name=name + "/identity_conv")
else:
identity = x
x = senet_bottleneck(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
training=training,
data_format=data_format,
name=name + "/body")
x = se_block(
x=x,
channels=out_channels,
data_format=data_format,
name=name + "/se")
x = x + identity
x = tf.nn.relu(x, name=name + "/activ")
return x
def senet_init_block(x,
in_channels,
out_channels,
training,
data_format,
name="senet_init_block"):
"""
SENet specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'senet_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = out_channels // 2
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
training=training,
data_format=data_format,
name=name + "/conv1")
x = conv3x3_block(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
training=training,
data_format=data_format,
name=name + "/conv2")
x = conv3x3_block(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
training=training,
data_format=data_format,
name=name + "/conv3")
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name=name + "/pool")
return x
class SENet(object):
"""
SENet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(SENet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.cardinality = cardinality
self.bottleneck_width = bottleneck_width
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = senet_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
identity_conv3x3 = (i != 0)
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
x = senet_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=self.cardinality,
bottleneck_width=self.bottleneck_width,
identity_conv3x3=identity_conv3x3,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dropout(
rate=0.2,
name="output/dropout")(
inputs=x,
training=training)
x = tf.keras.layers.Dense(
units=self.classes,
name="output/fc")(x)
return x
def get_senet(blocks,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create SENet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if blocks == 16:
layers = [1, 1, 1, 1]
cardinality = 32
elif blocks == 28:
layers = [2, 2, 2, 2]
cardinality = 32
elif blocks == 40:
layers = [3, 3, 3, 3]
cardinality = 32
elif blocks == 52:
layers = [3, 4, 6, 3]
cardinality = 32
elif blocks == 103:
layers = [3, 4, 23, 3]
cardinality = 32
elif blocks == 154:
layers = [3, 8, 36, 3]
cardinality = 64
else:
raise ValueError("Unsupported SENet with number of blocks: {}".format(blocks))
bottleneck_width = 4
init_block_channels = 128
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SENet(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def senet16(**kwargs):
"""
SENet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_senet(blocks=16, model_name="senet16", **kwargs)
def senet28(**kwargs):
"""
SENet-28 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_senet(blocks=28, model_name="senet28", **kwargs)
def senet40(**kwargs):
"""
SENet-40 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_senet(blocks=40, model_name="senet40", **kwargs)
def senet52(**kwargs):
"""
SENet-52 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_senet(blocks=52, model_name="senet52", **kwargs)
def senet103(**kwargs):
"""
SENet-103 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_senet(blocks=103, model_name="senet103", **kwargs)
def senet154(**kwargs):
"""
SENet-154 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_senet(blocks=154, model_name="senet154", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
senet16,
senet28,
senet40,
senet52,
senet103,
senet154,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != senet16 or weight_count == 31366168)
assert (model != senet28 or weight_count == 36453768)
assert (model != senet40 or weight_count == 41541368)
assert (model != senet52 or weight_count == 44659416)
assert (model != senet103 or weight_count == 60963096)
assert (model != senet154 or weight_count == 115088984)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 16,887 | 28.16753 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/shufflenet.py | """
ShuffleNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
"""
__all__ = ['ShuffleNet', 'shufflenet_g1_w1', 'shufflenet_g2_w1', 'shufflenet_g3_w1', 'shufflenet_g4_w1',
'shufflenet_g8_w1', 'shufflenet_g1_w3d4', 'shufflenet_g3_w3d4', 'shufflenet_g1_wd2', 'shufflenet_g3_wd2',
'shufflenet_g1_wd4', 'shufflenet_g3_wd4']
import os
import tensorflow as tf
from .common import conv1x1, conv3x3, depthwise_conv3x3, batchnorm, channel_shuffle, maxpool2d, avgpool2d,\
is_channels_first, get_channel_axis, flatten
def shuffle_unit(x,
in_channels,
out_channels,
groups,
downsample,
ignore_group,
training,
data_format,
name="shuffle_unit"):
"""
ShuffleNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
groups : int
Number of groups in convolution layers.
downsample : bool
Whether do downsample.
ignore_group : bool
Whether ignore group value in the first convolution layer.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'shuffle_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = out_channels // 4
if downsample:
out_channels -= in_channels
identity = x
x = conv1x1(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
groups=(1 if ignore_group else groups),
data_format=data_format,
name=name + "/compress_conv1")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/compress_bn1")
x = tf.nn.relu(x, name=name + "/activ")
x = channel_shuffle(
x=x,
groups=groups,
data_format=data_format)
x = depthwise_conv3x3(
x=x,
channels=mid_channels,
strides=(2 if downsample else 1),
data_format=data_format,
name=name + "/dw_conv2")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/dw_bn2")
x = conv1x1(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
groups=groups,
data_format=data_format,
name=name + "/expand_conv3")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/expand_bn3")
if downsample:
identity = avgpool2d(
x=identity,
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name=name + "/avgpool")
x = tf.concat([x, identity], axis=get_channel_axis(data_format), name=name + "/concat")
else:
x = x + identity
x = tf.nn.relu(x, name=name + "/final_activ")
return x
def shuffle_init_block(x,
in_channels,
out_channels,
training,
data_format,
name="shuffle_init_block"):
"""
ShuffleNet specific initial block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'shuffle_init_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv3x3(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=2,
data_format=data_format,
name=name + "/conv")
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/bn")
x = tf.nn.relu(x, name=name + "/activ")
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name=name + "/pool")
return x
class ShuffleNet(object):
"""
ShuffleNet model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
groups : int
Number of groups in convolution layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
groups,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(ShuffleNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.groups = groups
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = shuffle_init_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
ignore_group = (i == 0) and (j == 0)
x = shuffle_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
groups=self.groups,
downsample=downsample,
ignore_group=ignore_group,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_shufflenet(groups,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ShuffleNet model with specific parameters.
Parameters:
----------
groups : int
Number of groups in convolution layers.
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
init_block_channels = 24
layers = [4, 8, 4]
if groups == 1:
channels_per_layers = [144, 288, 576]
elif groups == 2:
channels_per_layers = [200, 400, 800]
elif groups == 3:
channels_per_layers = [240, 480, 960]
elif groups == 4:
channels_per_layers = [272, 544, 1088]
elif groups == 8:
channels_per_layers = [384, 768, 1536]
else:
raise ValueError("The {} of groups is not supported".format(groups))
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
init_block_channels = int(init_block_channels * width_scale)
net = ShuffleNet(
channels=channels,
init_block_channels=init_block_channels,
groups=groups,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def shufflenet_g1_w1(**kwargs):
"""
ShuffleNet 1x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=1, width_scale=1.0, model_name="shufflenet_g1_w1", **kwargs)
def shufflenet_g2_w1(**kwargs):
"""
ShuffleNet 1x (g=2) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=2, width_scale=1.0, model_name="shufflenet_g2_w1", **kwargs)
def shufflenet_g3_w1(**kwargs):
"""
ShuffleNet 1x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=3, width_scale=1.0, model_name="shufflenet_g3_w1", **kwargs)
def shufflenet_g4_w1(**kwargs):
"""
ShuffleNet 1x (g=4) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=4, width_scale=1.0, model_name="shufflenet_g4_w1", **kwargs)
def shufflenet_g8_w1(**kwargs):
"""
ShuffleNet 1x (g=8) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=8, width_scale=1.0, model_name="shufflenet_g8_w1", **kwargs)
def shufflenet_g1_w3d4(**kwargs):
"""
ShuffleNet 0.75x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=1, width_scale=0.75, model_name="shufflenet_g1_w3d4", **kwargs)
def shufflenet_g3_w3d4(**kwargs):
"""
ShuffleNet 0.75x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=3, width_scale=0.75, model_name="shufflenet_g3_w3d4", **kwargs)
def shufflenet_g1_wd2(**kwargs):
"""
ShuffleNet 0.5x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=1, width_scale=0.5, model_name="shufflenet_g1_wd2", **kwargs)
def shufflenet_g3_wd2(**kwargs):
"""
ShuffleNet 0.5x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=3, width_scale=0.5, model_name="shufflenet_g3_wd2", **kwargs)
def shufflenet_g1_wd4(**kwargs):
"""
ShuffleNet 0.25x (g=1) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=1, width_scale=0.25, model_name="shufflenet_g1_wd4", **kwargs)
def shufflenet_g3_wd4(**kwargs):
"""
ShuffleNet 0.25x (g=3) model from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile
Devices,' https://arxiv.org/abs/1707.01083.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_shufflenet(groups=3, width_scale=0.25, model_name="shufflenet_g3_wd4", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
shufflenet_g1_w1,
shufflenet_g2_w1,
shufflenet_g3_w1,
shufflenet_g4_w1,
shufflenet_g8_w1,
shufflenet_g1_w3d4,
shufflenet_g3_w3d4,
shufflenet_g1_wd2,
shufflenet_g3_wd2,
shufflenet_g1_wd4,
shufflenet_g3_wd4,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenet_g1_w1 or weight_count == 1531936)
assert (model != shufflenet_g2_w1 or weight_count == 1733848)
assert (model != shufflenet_g3_w1 or weight_count == 1865728)
assert (model != shufflenet_g4_w1 or weight_count == 1968344)
assert (model != shufflenet_g8_w1 or weight_count == 2434768)
assert (model != shufflenet_g1_w3d4 or weight_count == 975214)
assert (model != shufflenet_g3_w3d4 or weight_count == 1238266)
assert (model != shufflenet_g1_wd2 or weight_count == 534484)
assert (model != shufflenet_g3_wd2 or weight_count == 718324)
assert (model != shufflenet_g1_wd4 or weight_count == 209746)
assert (model != shufflenet_g3_wd4 or weight_count == 305902)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 19,344 | 30.151369 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/common.py | """
Common routines for models in TensorFlow.
"""
__all__ = ['round_channels', 'hswish', 'is_channels_first', 'get_channel_axis', 'flatten', 'batchnorm', 'maxpool2d',
'avgpool2d', 'conv2d', 'conv1x1', 'conv3x3', 'depthwise_conv3x3', 'conv_block', 'conv1x1_block',
'conv3x3_block', 'conv7x7_block', 'dwconv3x3_block', 'dwconv5x5_block', 'pre_conv_block',
'pre_conv1x1_block', 'pre_conv3x3_block', 'se_block', 'channel_shuffle', 'channel_shuffle2']
import math
import numpy as np
import tensorflow as tf
# import tensorflow.compat.v1 as tf
# tf.disable_v2_behavior()
def round_channels(channels,
divisor=8):
"""
Round weighted channel number (make divisible operation).
Parameters:
----------
channels : int or float
Original number of channels.
divisor : int, default 8
Alignment value.
Returns:
-------
int
Weighted number of channels.
"""
rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor)
if float(rounded_channels) < 0.9 * channels:
rounded_channels += divisor
return rounded_channels
def hsigmoid(x,
name="hsigmoid"):
"""
Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,'
https://arxiv.org/abs/1905.02244.
Parameters:
----------
x : Tensor
Input tensor.
name : str, default 'hsigmoid'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return tf.nn.relu6(x + 3.0, name=name) / 6.0
def hswish(x,
name="hswish"):
"""
H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
x : Tensor
Input tensor.
name : str, default 'hswish'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return x * tf.nn.relu6(x + 3.0, name=name) / 6.0
def get_activation_layer(x,
activation,
name="activ"):
"""
Create activation layer from string/function.
Parameters:
----------
x : Tensor
Input tensor.
activation : function or str
Activation function or name of activation function.
name : str, default 'activ'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
assert (activation is not None)
if isinstance(activation, str):
if activation == "relu":
x = tf.nn.relu(x, name=name)
elif activation == "relu6":
x = tf.nn.relu6(x, name=name)
elif activation == "hswish":
x = hswish(x, name=name)
else:
raise NotImplementedError()
else:
x = activation(x)
return x
def is_channels_first(data_format):
"""
Is tested data format channels first.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
bool
A flag.
"""
return data_format == "channels_first"
def get_channel_axis(data_format):
"""
Get channel axis.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
int
Channel axis.
"""
return 1 if is_channels_first(data_format) else -1
def flatten(x,
data_format):
"""
Flattens the input to two dimensional.
Parameters:
----------
x : Tensor
Input tensor.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
Tensor
Resulted tensor.
"""
if not is_channels_first(data_format):
x = tf.transpose(x, perm=(0, 3, 1, 2))
x = tf.reshape(x, shape=(-1, np.prod(x.get_shape().as_list()[1:])))
return x
def batchnorm(x,
momentum=0.9,
epsilon=1e-5,
training=False,
data_format="channels_last",
name=None):
"""
Batch normalization layer.
Parameters:
----------
x : Tensor
Input tensor.
momentum : float, default 0.9
Momentum for the moving average.
epsilon : float, default 1e-5
Small float added to variance to avoid dividing by zero.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default None
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = tf.keras.layers.BatchNormalization(
axis=get_channel_axis(data_format),
momentum=momentum,
epsilon=epsilon,
name=name)(
inputs=x,
training=training)
return x
def maxpool2d(x,
pool_size,
strides,
padding=0,
ceil_mode=False,
data_format="channels_last",
name=None):
"""
Max pooling operation for two dimensional (spatial) data.
Parameters:
----------
x : Tensor
Input tensor.
pool_size : int or tuple/list of 2 int
Size of the max pooling windows.
strides : int or tuple/list of 2 int
Strides of the pooling.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default None
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if ceil_mode:
height = int(x.shape[2])
out_height = float(height + 2 * padding[0] - pool_size[0]) / strides[0] + 1.0
if math.ceil(out_height) > math.floor(out_height):
padding = (padding[0] + 1, padding[1])
width = int(x.shape[3])
out_width = float(width + 2 * padding[1] - pool_size[1]) / strides[1] + 1.0
if math.ceil(out_width) > math.floor(out_width):
padding = (padding[0], padding[1] + 1)
if (padding[0] > 0) or (padding[1] > 0):
if is_channels_first(data_format):
x = tf.pad(x, [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2], mode="REFLECT")
else:
x = tf.pad(x, [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]], mode="REFLECT")
x = tf.keras.layers.MaxPooling2D(
pool_size=pool_size,
strides=strides,
padding="valid",
data_format=data_format,
name=name)(x)
return x
def avgpool2d(x,
pool_size,
strides,
padding=0,
ceil_mode=False,
data_format="channels_last",
name=None):
"""
Average pooling operation for two dimensional (spatial) data.
Parameters:
----------
x : Tensor
Input tensor.
pool_size : int or tuple/list of 2 int
Size of the max pooling windows.
strides : int or tuple/list of 2 int
Strides of the pooling.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default None
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if ceil_mode:
height = int(x.shape[2])
out_height = float(height + 2 * padding[0] - pool_size[0]) / strides[0] + 1.0
if math.ceil(out_height) > math.floor(out_height):
padding = (padding[0] + 1, padding[1])
width = int(x.shape[3])
out_width = float(width + 2 * padding[1] - pool_size[1]) / strides[1] + 1.0
if math.ceil(out_width) > math.floor(out_width):
padding = (padding[0], padding[1] + 1)
if (padding[0] > 0) or (padding[1] > 0):
if is_channels_first(data_format):
x = tf.pad(x, [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2], mode="CONSTANT")
else:
x = tf.pad(x, [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]], mode="CONSTANT")
x = tf.keras.layers.AveragePooling2D(
pool_size=pool_size,
strides=1,
padding="valid",
data_format=data_format,
name=name)(x)
if (strides[0] > 1) or (strides[1] > 1):
x = tf.keras.layers.AveragePooling2D(
pool_size=1,
strides=strides,
padding="valid",
data_format=data_format,
name=name + "/stride")(x)
return x
def conv2d(x,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
dilation=1,
groups=1,
use_bias=True,
data_format="channels_last",
name="conv2d"):
"""
Convolution 2D layer wrapper.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv2d'
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if isinstance(dilation, int):
dilation = (dilation, dilation)
if (padding[0] > 0) or (padding[1] > 0):
if is_channels_first(data_format):
paddings_tf = [[0, 0], [0, 0], [padding[0]] * 2, [padding[1]] * 2]
else:
paddings_tf = [[0, 0], [padding[0]] * 2, [padding[1]] * 2, [0, 0]]
x = tf.pad(x, paddings=paddings_tf)
if groups == 1:
x = tf.keras.layers.Conv2D(
filters=out_channels,
kernel_size=kernel_size,
strides=strides,
padding="valid",
data_format=data_format,
dilation_rate=dilation,
use_bias=use_bias,
kernel_initializer=tf.keras.initializers.VarianceScaling(2.0),
name=name)(x)
elif (groups == out_channels) and (out_channels == in_channels):
assert (dilation[0] == 1) and (dilation[1] == 1)
kernel = tf.compat.v1.get_variable(
name=name + "/dw_kernel",
shape=kernel_size + (in_channels, 1),
initializer=tf.keras.initializers.VarianceScaling(2.0))
x = tf.nn.depthwise_conv2d(
input=x,
filter=kernel,
strides=(1, 1) + strides if is_channels_first(data_format) else (1,) + strides + (1,),
padding="VALID",
rate=(1, 1),
name=name,
data_format="NCHW" if is_channels_first(data_format) else "NHWC")
if use_bias:
raise NotImplementedError
else:
assert (in_channels % groups == 0)
assert (out_channels % groups == 0)
in_group_channels = in_channels // groups
out_group_channels = out_channels // groups
group_list = []
for gi in range(groups):
if is_channels_first(data_format):
xi = x[:, gi * in_group_channels:(gi + 1) * in_group_channels, :, :]
else:
xi = x[:, :, :, gi * in_group_channels:(gi + 1) * in_group_channels]
xi = tf.keras.layers.Conv2D(
filters=out_group_channels,
kernel_size=kernel_size,
strides=strides,
padding="valid",
data_format=data_format,
dilation_rate=dilation,
use_bias=use_bias,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0),
name=name + "/convgroup{}".format(gi + 1))(xi)
group_list.append(xi)
x = tf.concat(group_list, axis=get_channel_axis(data_format), name=name + "/concat")
return x
def conv1x1(x,
in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
data_format="channels_last",
name="conv1x1"):
"""
Convolution 1x1 layer.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv1x1'
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name=name)
def conv3x3(x,
in_channels,
out_channels,
strides=1,
padding=1,
groups=1,
use_bias=False,
data_format="channels_last",
name="conv3x3"):
"""
Convolution 3x3 layer.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv3x3'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name=name)
def depthwise_conv3x3(x,
channels,
strides,
data_format="channels_last",
name="depthwise_conv3x3"):
"""
Depthwise convolution 3x3 layer.
Parameters:
----------
x : Tensor
Input tensor.
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'depthwise_conv3x3'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv2d(
x=x,
in_channels=channels,
out_channels=channels,
kernel_size=3,
strides=strides,
padding=1,
groups=channels,
use_bias=False,
data_format=data_format,
name=name)
def conv_block(x,
in_channels,
out_channels,
kernel_size,
strides,
padding,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
activation="relu",
training=False,
data_format="channels_last",
name="conv_block"):
"""
Standard convolution block with Batch normalization and activation.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
data_format=data_format,
name=name + "/conv")
if use_bn:
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/bn")
if activation is not None:
x = get_activation_layer(
x=x,
activation=activation,
name=name + "/activ")
return x
def conv1x1_block(x,
in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
activation="relu",
training=False,
data_format="channels_last",
name="conv1x1_block"):
"""
1x1 version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv1x1_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
groups=groups,
use_bias=use_bias,
activation=activation,
training=training,
data_format=data_format,
name=name)
def conv3x3_block(x,
in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
groups=1,
use_bias=False,
use_bn=True,
activation="relu",
training=False,
data_format="channels_last",
name="conv3x3_block"):
"""
3x3 version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv3x3_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
use_bn=use_bn,
activation=activation,
training=training,
data_format=data_format,
name=name)
def conv5x5_block(x,
in_channels,
out_channels,
strides=1,
padding=2,
dilation=1,
groups=1,
use_bias=False,
activation="relu",
training=False,
data_format="channels_last",
name="conv3x3_block"):
"""
5x5 version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv3x3_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=5,
strides=strides,
padding=padding,
dilation=dilation,
groups=groups,
use_bias=use_bias,
activation=activation,
training=training,
data_format=data_format,
name=name)
def conv7x7_block(x,
in_channels,
out_channels,
strides=1,
padding=3,
use_bias=False,
activation="relu",
training=False,
data_format="channels_last",
name="conv7x7_block"):
"""
3x3 version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 3
Padding value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'conv7x7_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=strides,
padding=padding,
use_bias=use_bias,
activation=activation,
training=training,
data_format=data_format,
name=name)
def dwconv3x3_block(x,
in_channels,
out_channels,
strides=1,
padding=1,
dilation=1,
use_bias=False,
activation="relu",
training=False,
data_format="channels_last",
name="dwconv3x3_block"):
"""
3x3 depthwise version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'dwconv3x3_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
groups=out_channels,
use_bias=use_bias,
activation=activation,
training=training,
data_format=data_format,
name=name)
def dwconv5x5_block(x,
in_channels,
out_channels,
strides=1,
padding=2,
dilation=1,
use_bias=False,
activation="relu",
training=False,
data_format="channels_last",
name="dwconv3x3_block"):
"""
5x5 depthwise version of the standard convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 2
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
use_bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'dwconv3x3_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
return conv5x5_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
padding=padding,
dilation=dilation,
groups=out_channels,
use_bias=use_bias,
activation=activation,
training=training,
data_format=data_format,
name=name)
def pre_conv_block(x,
in_channels,
out_channels,
kernel_size,
strides,
padding,
return_preact=False,
training=False,
data_format="channels_last",
name="pre_conv_block"):
"""
Convolution block with Batch normalization and ReLU pre-activation.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'pre_conv_block'
Block name.
Returns:
-------
tuple of two Tensors
Resulted tensor and preactivated input tensor.
"""
x = batchnorm(
x=x,
training=training,
data_format=data_format,
name=name + "/bn")
x = tf.nn.relu(x, name=name + "/activ")
if return_preact:
x_pre_activ = x
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=False,
data_format=data_format,
name=name + "/conv")
if return_preact:
return x, x_pre_activ
else:
return x
def pre_conv1x1_block(x,
in_channels,
out_channels,
strides=1,
return_preact=False,
training=False,
data_format="channels_last",
name="pre_conv1x1_block"):
"""
1x1 version of the pre-activated convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'pre_conv1x1_block'
Block name.
Returns:
-------
tuple of two Tensors
Resulted tensor and preactivated input tensor.
"""
return pre_conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
return_preact=return_preact,
training=training,
data_format=data_format,
name=name)
def pre_conv3x3_block(x,
in_channels,
out_channels,
strides=1,
return_preact=False,
training=False,
data_format="channels_last",
name="pre_conv3x3_block"):
"""
3x3 version of the pre-activated convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
return_preact : bool, default False
Whether return pre-activation. It's used by PreResNet.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'pre_conv3x3_block'
Block name.
Returns:
-------
tuple of two Tensors
Resulted tensor and preactivated input tensor.
"""
return pre_conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=1,
return_preact=return_preact,
training=training,
data_format=data_format,
name=name)
def channel_shuffle(x,
groups,
data_format):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
data_format : str
The ordering of the dimensions in tensors.
Returns:
-------
keras.Tensor
Resulted tensor.
"""
x_shape = x.get_shape().as_list()
if is_channels_first(data_format):
channels = x_shape[1]
height = x_shape[2]
width = x_shape[3]
else:
height = x_shape[1]
width = x_shape[2]
channels = x_shape[3]
assert (channels % groups == 0)
channels_per_group = channels // groups
if is_channels_first(data_format):
x = tf.reshape(x, shape=(-1, groups, channels_per_group, height, width))
x = tf.transpose(x, perm=(0, 2, 1, 3, 4))
x = tf.reshape(x, shape=(-1, channels, height, width))
else:
x = tf.reshape(x, shape=(-1, height, width, groups, channels_per_group))
x = tf.transpose(x, perm=(0, 1, 2, 4, 3))
x = tf.reshape(x, shape=(-1, height, width, channels))
return x
def channel_shuffle2(x,
groups,
data_format):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
The alternative version.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
data_format : str
The ordering of the dimensions in tensors.
Returns:
-------
keras.Tensor
Resulted tensor.
"""
x_shape = x.get_shape().as_list()
if is_channels_first(data_format):
channels = x_shape[1]
height = x_shape[2]
width = x_shape[3]
else:
height = x_shape[1]
width = x_shape[2]
channels = x_shape[3]
assert (channels % groups == 0)
channels_per_group = channels // groups
if is_channels_first(data_format):
x = tf.reshape(x, shape=(-1, channels_per_group, groups, height, width))
x = tf.transpose(x, perm=(0, 2, 1, 3, 4))
x = tf.reshape(x, shape=(-1, channels, height, width))
else:
x = tf.reshape(x, shape=(-1, height, width, channels_per_group, groups))
x = tf.transpose(x, perm=(0, 1, 2, 4, 3))
x = tf.reshape(x, shape=(-1, height, width, channels))
return x
def se_block(x,
channels,
reduction=16,
approx_sigmoid=False,
round_mid=False,
activation="relu",
data_format="channels_last",
name="se_block"):
"""
Squeeze-and-Excitation block from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
x : Tensor
Input tensor.
channels : int
Number of channels.
reduction : int, default 16
Squeeze reduction value.
approx_sigmoid : bool, default False
Whether to use approximated sigmoid function.
round_mid : bool, default False
Whether to round middle channel number (make divisible by 8).
activation : function or str, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
name : str, default 'se_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
assert(len(x.shape) == 4)
mid_channels = channels // reduction if not round_mid else round_channels(float(channels) / reduction)
pool_size = x.shape[2:4] if is_channels_first(data_format) else x.shape[1:3]
w = tf.keras.layers.AveragePooling2D(
pool_size=pool_size,
strides=1,
data_format=data_format,
name=name + "/pool")(x)
w = conv1x1(
x=w,
in_channels=channels,
out_channels=mid_channels,
use_bias=True,
data_format=data_format,
name=name + "/conv1/conv")
w = get_activation_layer(
x=w,
activation=activation,
name=name + "/activ")
w = conv1x1(
x=w,
in_channels=mid_channels,
out_channels=channels,
use_bias=True,
data_format=data_format,
name=name + "/conv2/conv")
w = hsigmoid(w, name=name + "/hsigmoid") if approx_sigmoid else tf.nn.sigmoid(w, name=name + "/sigmoid")
x = x * w
return x
| 39,625 | 28.265879 | 120 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/model_store.py | """
Model store which provides pretrained models.
"""
__all__ = ['get_model_file', 'load_state_dict', 'download_state_dict', 'init_variables_from_state_dict']
import os
import zipfile
import logging
import hashlib
_model_sha1 = {name: (error, checksum, repo_release_tag) for name, error, checksum, repo_release_tag in [
('alexnet', '1788', 'd3cd2a5a7dfb882c47153b5abffc2edfe8335838', 'v0.0.394'),
('alexnetb', '1853', '58a51cd1803929c52eed6e1c69a43328fcc1d1cb', 'v0.0.384'),
('zfnet', '1715', 'a18747efbec5ce849244a540651228e287d13296', 'v0.0.395'),
('zfnetb', '1482', '2624da317b57bec7d3ce73e7548caa6eca0a6ad6', 'v0.0.400'),
('vgg11', '1015', 'b87e9dbcbab308f671a69ef3ed67067e62c5429f', 'v0.0.381'),
('vgg13', '0946', 'f1411e1fdd5e75919d4e1a0f7b33ac06e0acb146', 'v0.0.388'),
('vgg16', '0830', 'e63ead2e896e1a5185840b8cc0973d0791b19d35', 'v0.0.401'),
('vgg19', '0768', 'cf2a33c6221e44432f38dd8bdcf1312efd208ae8', 'v0.0.420'),
('bn_vgg11', '0936', '4ff8667b2daba34bb4531744a50c000543e4e524', 'v0.0.339'),
('bn_vgg13', '0888', '0a49f8714fa647684940feb6cffb0a468290a5af', 'v0.0.353'),
('bn_vgg16', '0755', '9948c82dcb133081796d095ebd5cf8815d77f7d1', 'v0.0.359'),
('bn_vgg19', '0689', '8a3197c6a27aa4a271e31610ff6cc58c6b593a81', 'v0.0.360'),
('bn_vgg11b', '0979', '6a3890a42d0b7bab962582298ddaf6077eedf22d', 'v0.0.407'),
('bn_vgg13b', '1015', '999e47a6a5d4cb493d1af3e31de04d67d25176e8', 'v0.0.123'),
('bn_vgg16b', '0866', '1f8251aa987151e89a82f0f209c2a8bbde0f0c47', 'v0.0.123'),
('bn_vgg19b', '0817', '784e4c396e6de685727bc8fd30f5ed35c66a84a0', 'v0.0.123'),
('resnet10', '1390', '7fff13aee28ba7601c155907be64aff344530736', 'v0.0.248'),
('resnet12', '1300', '9539494f8fae66c454efed4e5abf26c273d3b285', 'v0.0.253'),
('resnet14', '1225', 'd1fb0f762258c9fd04a3ad469462f732333740fa', 'v0.0.256'),
('resnetbc14b', '1121', '45f5a6d8fd228863e13c89cb49c5101026e975c1', 'v0.0.309'),
('resnet16', '1086', '5ac8e7da9dcee268db9b7cf4ecfeceb8efca3005', 'v0.0.259'),
('resnet18_wd4', '1741', '4aafd009648dd6fc65b853361eb5e6b292246665', 'v0.0.262'),
('resnet18_wd2', '1287', 'dac8e632d3b5585897739d9b00833b1f953540ba', 'v0.0.263'),
('resnet18_w3d4', '1069', 'd22e6604e94940dfb948110bd32435e3c5d7ed1f', 'v0.0.266'),
('resnet18', '0956', 'b4fc7198d9bbcf6699b904824c839943871401bc', 'v0.0.153'),
('resnet26', '0838', 'f647811d7211d82344419b1590fb3ae73433efa7', 'v0.0.305'),
('resnetbc26b', '0757', '55c88013263af95b0d391069e34542a5d899cc7d', 'v0.0.313'),
('resnet34', '0742', '8faa0ab2cbb8ff4ad3bb62aa82da3dd1eb3ef05d', 'v0.0.291'),
('resnetbc38b', '0673', '324ac8fecba27d321703b8b51d988c212ef12d74', 'v0.0.328'),
('resnet50', '0605', '34177a2e963820ae5ee9c7b2bd233a2566928774', 'v0.0.329'),
('resnet50b', '0609', '4b68417369140303594ae69d5ac5891e9fe91267', 'v0.0.308'),
('resnet101', '0601', '3fc260bc67ab133b39f087862f5bc70cf6aa9442', 'v0.0.72'),
('resnet101b', '0507', '527dca370eb8a2a4a25025993f8ccce35b00c9ef', 'v0.0.357'),
('resnet152', '0535', 'b21844fcaea4e14a91fa17bfa870a3d056d258ea', 'v0.0.144'),
('resnet152b', '0485', '36964f4867125dd08fa722d4d639273d7d1874e1', 'v0.0.378'),
('preresnet10', '1401', '3a2eed3b9254d35ba546c9894cf9cc3c6d88aa5c', 'v0.0.249'),
('preresnet12', '1321', '0c424c407bd91c5135ec74660b5f001f07cec0df', 'v0.0.257'),
('preresnet14', '1216', 'fda0747fd40cad58e46dad53e68d3d06b8829829', 'v0.0.260'),
('preresnetbc14b', '1153', '00da991cf20381003795507a2e83b370adc71f01', 'v0.0.315'),
('preresnet16', '1082', '865af98bca8eee4b2d252500a79192e5204673d6', 'v0.0.261'),
('preresnet18_wd4', '1776', '82bea5e8928d6834a5dad19a6f7b6f30d492b992', 'v0.0.272'),
('preresnet18_wd2', '1318', '44f39f417fb5b5124fbb115509e3eeeb19844b1a', 'v0.0.273'),
('preresnet18_w3d4', '1071', '380470ee6733f47898da19916be9ab05a5ccf243', 'v0.0.274'),
('preresnet18', '0949', '692e6c11e738c11eaf818d60a214e7a905a873c1', 'v0.0.140'),
('preresnet26', '0833', '8de37e08f3c2dd054a1dc4099d4b398097999af6', 'v0.0.316'),
('preresnetbc26b', '0789', '993dd84a36d8f1417e2f5454ec5f3b3159f251c1', 'v0.0.325'),
('preresnet34', '0754', '9d5635846928420d41f7304e02a4d33160af45e7', 'v0.0.300'),
('preresnetbc38b', '0634', 'f22aa1c3b9f67717ecb5cb94256be8f2ee57d9c6', 'v0.0.348'),
('preresnet50', '0625', '06130b124a1abf96cc92f7d212ca9b524da02ddd', 'v0.0.330'),
('preresnet50b', '0631', '9fc00073139d763ef08e2fc810c2469c9b0182c9', 'v0.0.307'),
('preresnet101', '0572', 'cd61594e9e2fb758ca69a38baf31223351638c4f', 'v0.0.73'),
('preresnet101b', '0539', 'c0b9e129908051592393ba5e7939a4feb5b82b6c', 'v0.0.351'),
('preresnet152', '0529', 'b761f286ab284b916f388cc5d6af00e5ea049081', 'v0.0.73'),
('preresnet152b', '0500', '7ae9df4bbabbc12d32a35f4369d64269ba3c8e7b', 'v0.0.386'),
('preresnet200b', '0560', '881e0e2869428d89831bde0c7da219ed69236f16', 'v0.0.73'),
('preresnet269b', '0555', 'c799eaf246d3dccf72ac10cdec3f35bd8bf72e71', 'v0.0.239'),
('resnext14_16x4d', '1224', '3f603dde73c4581f60ada40499ed42d800847268', 'v0.0.370'),
('resnext14_32x2d', '1246', 'df7d6b8a824796742a0bb369d654135cd109dfb3', 'v0.0.371'),
('resnext14_32x4d', '1113', 'cac0dad52d391f9268c9fee6f95be59e14952fcc', 'v0.0.327'),
('resnext26_32x2d', '0849', '2dee5d79b8f093f1f6d1cf87b7f36e0481c6648f', 'v0.0.373'),
('resnext26_32x4d', '0717', '594567d27cea1f5e324a6ecfd93f209d30c148d9', 'v0.0.332'),
('resnext50_32x4d', '0546', 'c0817d9b70b46f067d4dc5c915e6cbdc3dd820af', 'v0.0.417'),
('resnext101_32x4d', '0493', 'de52ea63f204c839c176f6162ae73a19a33626c4', 'v0.0.417'),
('resnext101_64x4d', '0485', 'ddff97a9e6aa2ccd603a067c2158044cec8b8342', 'v0.0.417'),
('seresnet10', '1336', 'd4a0a9d3e2e2b4188aac06d3b6cc4132f05ac916', 'v0.0.354'),
('seresnet18', '0923', '7aa519d2ec4c721c61a9fd04a9b0ca745f12b24a', 'v0.0.355'),
('seresnet26', '0809', 'b2a8b74fe11edbfa798c35d882d6ebd5cfceb6ff', 'v0.0.363'),
('seresnetbc26b', '0681', '692ccde37b4dc19df0bc5b92256f0023228baf98', 'v0.0.366'),
('seresnetbc38b', '0578', '2d787dc45bd6775fa96b4048ab5ac191089a0ab0', 'v0.0.374'),
('seresnet50', '0643', 'e022e5b9e58e19c692d00394c85daa57ea943b82', 'v0.0.75'),
('seresnet50b', '0533', '539e58be15125cf7693cc6318d99592a2f956d48', 'v0.0.387'),
('seresnet101', '0589', '305d23018de942b25df59d8ac9d2dd14374d7d28', 'v0.0.75'),
('seresnet152', '0578', 'd06ab6d909129693da68c552b91f3f344795114f', 'v0.0.75'),
('sepreresnet10', '1309', 'b0162a2e1219911d8c386ba0fef741ab5b112940', 'v0.0.377'),
('sepreresnet18', '0941', '5606cb354b61974a97ae81807194638fc3ea0576', 'v0.0.380'),
('sepreresnetbc26b', '0634', 'd903397d7afafbf43b5c19da927601a128cafd0b', 'v0.0.399'),
('sepreresnetbc38b', '0564', '262a4a2e23d34ab244b107fe8397e776744e4fcb', 'v0.0.409'),
('seresnext50_32x4d', '0507', '982a4cb8190a4e7bb21d4582336c13d8363c4ece', 'v0.0.418'),
('seresnext101_32x4d', '0461', 'b84ec20adb9d67f56ac4cd6eb35b134f964c1936', 'v0.0.418'),
('seresnext101_64x4d', '0465', 'b16029e686fb50fd64ed59df22c9d3c5ed0470c1', 'v0.0.418'),
('senet16', '0803', '366c58ce2f47ded548734cf336d46b50517c78c4', 'v0.0.341'),
('senet28', '0594', '98ba8cc2068495fe192af40328f1838b1e835b6f', 'v0.0.356'),
('senet154', '0463', 'c86eaaed79c696a32ace4a8576fc0b50f0f93900', 'v0.0.86'),
('densenet121', '0688', 'e3bccdc5544f46352bb91671ac4cd7e2f788952b', 'v0.0.314'),
('densenet161', '0617', '9deca33a34a5c4a0a84f0a37920dbfd1cad85cb7', 'v0.0.77'),
('densenet169', '0606', 'fcbb5c869350e22cc79b15a0508f2f5598dacb90', 'v0.0.406'),
('densenet201', '0635', '5eda789595ba0b8b450705220704687fa8ea8788', 'v0.0.77'),
('darknet_tiny', '1751', '750ff8d9b17beb5ab88200aa787dfcb5b6ca8b36', 'v0.0.71'),
('darknet_ref', '1672', '3c8ed62a43b9e8934b4beb7c47ce4c7b2cdb7a64', 'v0.0.71'),
('darknet53', '0555', '49816dbf617b2cd14051c2d7cd0325ee3ebb63a2', 'v0.0.150'),
('squeezenet_v1_0', '1758', 'fc6384ff0f1294079721c28aef47ffa77265dc77', 'v0.0.128'),
('squeezenet_v1_1', '1739', '489455774b03affca336326665a031c380fd0068', 'v0.0.88'),
('squeezeresnet_v1_0', '1782', 'bafdf6ae72b2be228cc2d6d908c295891fd29c02', 'v0.0.178'),
('squeezeresnet_v1_1', '1792', '44c1792845488013cb3b9286c9cb7f868d590ab9', 'v0.0.79'),
('sqnxt23_w1', '2108', '6267020032ac7d6aa0905b916954864cdfea4934', 'v0.0.171'),
('sqnxt23v5_w1', '2077', 'ebc0c53dc0c39e72eb620b06c2eb07ba451fb28d', 'v0.0.172'),
('sqnxt23_w3d2', '1509', '8fbdcd6dde6a3fb2f8e8aab4d1eb828123becfb5', 'v0.0.210'),
('sqnxt23v5_w3d2', '1539', 'ae14d7b8685b23fcffeba96038e31255a7c718fa', 'v0.0.212'),
('sqnxt23_w2', '1235', 'ea1ae9b747fb40f670b32fad28844fdc2af5ea66', 'v0.0.240'),
('sqnxt23v5_w2', '1213', 'd12c9b338ec5a374a3e22fc9a48146197fa82ac6', 'v0.0.216'),
('shufflenet_g1_wd4', '3680', '3d9856357041fb69f4a6ddf0208e7821605487a9', 'v0.0.134'),
('shufflenet_g3_wd4', '3617', '8f00e642cfc2b7ab8b1a770513bb46190c3bcb7d', 'v0.0.135'),
('shufflenet_g1_wd2', '2231', 'd5356e3b04c4a30d568755807e996821098d8aae', 'v0.0.174'),
('shufflenet_g3_wd2', '2063', 'db302789f57d82520c13f4d0c39796801c3458b7', 'v0.0.167'),
('shufflenet_g1_w3d4', '1678', 'ca175843c5d78bf7d6c826142df810b1b721978b', 'v0.0.218'),
('shufflenet_g3_w3d4', '1613', 'f7a106be40b1cdcc68e1cf185451832aec3584fc', 'v0.0.219'),
('shufflenet_g1_w1', '1351', '2f36fdbc45ef00b49dd558b3b2e5b238be2e28ca', 'v0.0.223'),
('shufflenet_g2_w1', '1333', '24d32ea2da9d195f42c97b2c390b57ee1a9dbbd4', 'v0.0.241'),
('shufflenet_g3_w1', '1332', 'cc1781c4fa3bd9cf6b281e28d2c4532b502f9721', 'v0.0.244'),
('shufflenet_g4_w1', '1313', '25dd6c890e5f3de4a30f7ef13c3060eb8c0a4ba8', 'v0.0.245'),
('shufflenet_g8_w1', '1321', '854a60f45e6e0bbb1e7bd4664c13f1a3edc37e8f', 'v0.0.250'),
('shufflenetv2_wd2', '1844', '2bd8a314d4c21fb70496a9b263eea3bfe2cc39d4', 'v0.0.90'),
('shufflenetv2_w1', '1131', '6a728e21f405d52b0deade6878f4661089b47a51', 'v0.0.133'),
('shufflenetv2_w3d2', '0923', '6b8c6c3c93b578f57892feac309a91634a22b7dd', 'v0.0.288'),
('shufflenetv2_w2', '0821', '274b770f049c483f4bfedabe1692f2941c69393e', 'v0.0.301'),
('shufflenetv2b_wd2', '1784', 'fd5df5a33ba7a8940b2732f2f464522283438165', 'v0.0.158'),
('shufflenetv2b_w1', '1104', '6df32bad4c38e603dd75c89ba39c25d45162ab43', 'v0.0.161'),
('shufflenetv2b_w3d2', '0880', '9ce6d2b779f0f2483ffc8c8396a9c22af0ea712b', 'v0.0.203'),
('shufflenetv2b_w2', '0810', '164690eda8bf24de2f2835250646b8164b9de1dc', 'v0.0.242'),
('menet108_8x1_g3', '2032', '4e9e89e10f7bc055c83bbbb0e9f283f983546288', 'v0.0.89'),
('menet128_8x1_g4', '1915', '148105f444f44137b3df2d50ef63d811a9d1da82', 'v0.0.103'),
('menet160_8x1_g8', '2028', '7ff635d185d0228f147dc32c225da85c99763e9b', 'v0.0.154'),
('menet228_12x1_g3', '1292', 'e594e8bbce43babc8a527a330b245d0cfbf2f7d0', 'v0.0.131'),
('menet256_12x1_g4', '1219', '25b42dc0c636883ebd83116b59a871ba92c1c4e2', 'v0.0.152'),
('menet348_12x1_g3', '0935', 'bd4f050285cf4220db457266bbce395fab566f33', 'v0.0.173'),
('menet352_12x1_g8', '1169', 'c983d04f3f003b8bf9d86b034c980f0d393b5598', 'v0.0.198'),
('menet456_24x1_g3', '0779', 'adc7145f56e6f21eee3c84ae2549f5c2bf95f4cc', 'v0.0.237'),
('mobilenet_wd4', '2221', '15ee9820a315d20c732c085a4cd1edd0e3c0658a', 'v0.0.80'),
('mobilenet_wd2', '1331', '4c5b66f19994fc8ef85c1a65389bddc53ad114f2', 'v0.0.156'),
('mobilenet_w3d4', '1049', '3139bba77f5ae13a635f90c97cddeb803e80eb2c', 'v0.0.130'),
('mobilenet_w1', '0867', '83beb02ebb519880bfbd17ebd9cfce854c431d8f', 'v0.0.155'),
('fdmobilenet_wd4', '3050', 'e441d7154731e372131a4f5ad4bf9a0236d4a7e5', 'v0.0.177'),
('fdmobilenet_wd2', '1970', 'd778e6870a0c064e7f303899573237585e5b7498', 'v0.0.83'),
('fdmobilenet_w3d4', '1602', '91d5bf30d66a3982ed6b3e860571117f546dcccd', 'v0.0.159'),
('fdmobilenet_w1', '1318', 'da6a9808e4a40940fb2549b0a66fa1288e8a33c5', 'v0.0.162'),
('mobilenetv2_wd4', '2416', 'ae7e5137b9b9c01b35f16380afe7e1423541475e', 'v0.0.137'),
('mobilenetv2_wd2', '1446', '696501bd3e6df77a78e85756403a3da23839244b', 'v0.0.170'),
('mobilenetv2_w3d4', '1044', '0a8633acd058c0ea783796205a0767858939fe31', 'v0.0.230'),
('mobilenetv2_w1', '0862', '03daae54f799467152612138da07a8c221666d70', 'v0.0.213'),
('igcv3_wd4', '2835', 'b41fb3c75e090cc719962e1ca2debcbac241dc22', 'v0.0.142'),
('igcv3_wd2', '1705', 'de0b98d950a3892b6d15d1c3ea248d41a34adf00', 'v0.0.132'),
('igcv3_w3d4', '1096', 'b8650159ab15b118c0655002d9ce613b3a36dea1', 'v0.0.207'),
('igcv3_w1', '0903', 'a69c216fa5838dba316b01d347846812835650fe', 'v0.0.243'),
('mnasnet_b1', '0800', 'a21e7b11537a81d57be61b27761efa69b0b44728', 'v0.0.419'),
('mnasnet_a1', '0756', '2903749fb1ac67254487ccf1668cae064170ffd1', 'v0.0.419')]}
imgclsmob_repo_url = 'https://github.com/osmr/imgclsmob'
def get_model_name_suffix_data(model_name):
if model_name not in _model_sha1:
raise ValueError("Pretrained model for {name} is not available.".format(name=model_name))
error, sha1_hash, repo_release_tag = _model_sha1[model_name]
return error, sha1_hash, repo_release_tag
def get_model_file(model_name,
local_model_store_dir_path=os.path.join("~", ".tensorflow", "models")):
"""
Return location for the pretrained on local file system. This function will download from online model zoo when
model cannot be found or has mismatch. The root directory will be created if it doesn't exist.
Parameters:
----------
model_name : str
Name of the model.
local_model_store_dir_path : str, default $TENSORFLOW_HOME/models
Location for keeping the model parameters.
Returns:
-------
file_path
Path to the requested pretrained model file.
"""
error, sha1_hash, repo_release_tag = get_model_name_suffix_data(model_name)
short_sha1 = sha1_hash[:8]
file_name = "{name}-{error}-{short_sha1}.tf.npz".format(
name=model_name,
error=error,
short_sha1=short_sha1)
local_model_store_dir_path = os.path.expanduser(local_model_store_dir_path)
file_path = os.path.join(local_model_store_dir_path, file_name)
if os.path.exists(file_path):
if _check_sha1(file_path, sha1_hash):
return file_path
else:
logging.warning("Mismatch in the content of model file detected. Downloading again.")
else:
logging.info("Model file not found. Downloading to {}.".format(file_path))
if not os.path.exists(local_model_store_dir_path):
os.makedirs(local_model_store_dir_path)
zip_file_path = file_path + ".zip"
_download(
url="{repo_url}/releases/download/{repo_release_tag}/{file_name}.zip".format(
repo_url=imgclsmob_repo_url,
repo_release_tag=repo_release_tag,
file_name=file_name),
path=zip_file_path,
overwrite=True)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(local_model_store_dir_path)
os.remove(zip_file_path)
if _check_sha1(file_path, sha1_hash):
return file_path
else:
raise ValueError("Downloaded file has different hash. Please try again.")
def _download(url, path=None, overwrite=False, sha1_hash=None, retries=5, verify_ssl=True):
"""Download an given URL
Parameters:
----------
url : str
URL to download
path : str, optional
Destination path to store downloaded file. By default stores to the
current directory with same name as in url.
overwrite : bool, optional
Whether to overwrite destination file if already exists.
sha1_hash : str, optional
Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
but doesn't match.
retries : integer, default 5
The number of times to attempt the download in case of failure or non 200 return codes
verify_ssl : bool, default True
Verify SSL certificates.
Returns:
-------
str
The file path of the downloaded file.
"""
import warnings
try:
import requests
except ImportError:
class requests_failed_to_import(object):
pass
requests = requests_failed_to_import
if path is None:
fname = url.split("/")[-1]
# Empty filenames are invalid
assert fname, "Can't construct file-name from this URL. Please set the `path` option manually."
else:
path = os.path.expanduser(path)
if os.path.isdir(path):
fname = os.path.join(path, url.split("/")[-1])
else:
fname = path
assert retries >= 0, "Number of retries should be at least 0"
if not verify_ssl:
warnings.warn(
"Unverified HTTPS request is being made (verify_ssl=False). "
"Adding certificate verification is strongly advised.")
if overwrite or not os.path.exists(fname) or (sha1_hash and not _check_sha1(fname, sha1_hash)):
dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
if not os.path.exists(dirname):
os.makedirs(dirname)
while retries + 1 > 0:
# Disable pyling too broad Exception
# pylint: disable=W0703
try:
print("Downloading {} from {}...".format(fname, url))
r = requests.get(url, stream=True, verify=verify_ssl)
if r.status_code != 200:
raise RuntimeError("Failed downloading url {}".format(url))
with open(fname, "wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if sha1_hash and not _check_sha1(fname, sha1_hash):
raise UserWarning("File {} is downloaded but the content hash does not match."
" The repo may be outdated or download may be incomplete. "
"If the `repo_url` is overridden, consider switching to "
"the default repo.".format(fname))
break
except Exception as e:
retries -= 1
if retries <= 0:
raise e
else:
print("download failed, retrying, {} attempt{} left"
.format(retries, "s" if retries > 1 else ""))
return fname
def _check_sha1(filename, sha1_hash):
"""Check whether the sha1 hash of the file content matches the expected hash.
Parameters:
----------
filename : str
Path to the file.
sha1_hash : str
Expected sha1 hash in hexadecimal digits.
Returns:
-------
bool
Whether the file content matches the expected hash.
"""
sha1 = hashlib.sha1()
with open(filename, "rb") as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
return sha1.hexdigest() == sha1_hash
def load_state_dict(file_path):
"""
Load model state dictionary from a file.
Parameters:
----------
file_path : str
Path to the file.
Returns:
-------
state_dict : dict
Dictionary with values of model variables.
"""
import numpy as np
assert os.path.exists(file_path) and os.path.isfile(file_path)
if file_path.endswith(".npy"):
state_dict = np.load(file_path, encoding="latin1").item()
elif file_path.endswith(".npz"):
state_dict = dict(np.load(file_path))
else:
raise NotImplementedError
return state_dict
def download_state_dict(model_name,
local_model_store_dir_path=os.path.join("~", ".tensorflow", "models")):
"""
Load model state dictionary from a file with downloading it if necessary.
Parameters:
----------
model_name : str
Name of the model.
local_model_store_dir_path : str, default $TENSORFLOW_HOME/models
Location for keeping the model parameters.
Returns:
-------
state_dict : dict
Dictionary with values of model variables.
file_path : str
Path to the file.
"""
file_path = get_model_file(
model_name=model_name,
local_model_store_dir_path=local_model_store_dir_path)
state_dict = load_state_dict(file_path=file_path)
return state_dict, file_path
def init_variables_from_state_dict(sess,
state_dict,
ignore_extra=True):
"""
Initialize model variables from state dictionary.
Parameters:
----------
sess: Session
A Session to use to load the weights.
state_dict : dict
Dictionary with values of model variables.
ignore_extra : bool, default True
Whether to silently ignore parameters from the file that are not present in this Module.
"""
import tensorflow as tf
assert sess is not None
if state_dict is None:
raise Exception("The state dict is empty")
dst_params = {v.name: v for v in tf.global_variables()}
sess.run(tf.global_variables_initializer())
for src_key in state_dict.keys():
if src_key in dst_params.keys():
assert (state_dict[src_key].shape == tuple(dst_params[src_key].get_shape().as_list()))
sess.run(dst_params[src_key].assign(state_dict[src_key]))
elif not ignore_extra:
raise Exception("The state dict is incompatible with the model")
else:
print("Key `{}` is ignored".format(src_key))
| 21,784 | 51.748184 | 115 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/zfnet.py | """
ZFNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901.
"""
__all__ = ['zfnet', 'zfnetb']
import os
import tensorflow as tf
from .common import is_channels_first
from .alexnet import AlexNet
def get_zfnet(version="a",
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create ZFNet model with specific parameters.
Parameters:
----------
version : str, default 'a'
Version of ZFNet ('a' or 'b').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if version == "a":
channels = [[96], [256], [384, 384, 256]]
kernel_sizes = [[7], [5], [3, 3, 3]]
strides = [[2], [2], [1, 1, 1]]
paddings = [[1], [0], [1, 1, 1]]
use_lrn = True
elif version == "b":
channels = [[96], [256], [512, 1024, 512]]
kernel_sizes = [[7], [5], [3, 3, 3]]
strides = [[2], [2], [1, 1, 1]]
paddings = [[1], [0], [1, 1, 1]]
use_lrn = True
else:
raise ValueError("Unsupported ZFNet version {}".format(version))
net = AlexNet(
channels=channels,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
use_lrn=use_lrn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def zfnet(**kwargs):
"""
ZFNet model from 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_zfnet(model_name="zfnet", **kwargs)
def zfnetb(**kwargs):
"""
ZFNet-b model from 'Visualizing and Understanding Convolutional Networks,' https://arxiv.org/abs/1311.2901.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_zfnet(version="b", model_name="zfnetb", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
zfnet,
zfnetb,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != zfnet or weight_count == 62357608)
assert (model != zfnetb or weight_count == 107627624)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 4,297 | 30.372263 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/__init__.py | 0 | 0 | 0 | py | |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/darknet53.py | """
DarkNet-53 for ImageNet-1K, implemented in TensorFlow.
Original source: 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767.
"""
__all__ = ['DarkNet53', 'darknet53']
import os
import tensorflow as tf
from .common import conv1x1_block, conv3x3_block, is_channels_first, flatten
def dark_unit(x,
in_channels,
out_channels,
alpha,
training,
data_format,
name="dark_unit"):
"""
DarkNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
alpha : float
Slope coefficient for Leaky ReLU activation.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'dark_unit'
Unit name.
Returns:
-------
Tensor
Resulted tensor.
"""
assert (out_channels % 2 == 0)
mid_channels = out_channels // 2
identity = x
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
activation=(lambda y: tf.nn.leaky_relu(y, alpha=alpha, name=name + "/conv1/activ")),
training=training,
data_format=data_format,
name=name + "/conv1")
x = conv3x3_block(
x=x,
in_channels=mid_channels,
out_channels=out_channels,
activation=(lambda y: tf.nn.leaky_relu(y, alpha=alpha, name=name + "/conv2/activ")),
training=training,
data_format=data_format,
name=name + "/conv2")
x = x + identity
return x
class DarkNet53(object):
"""
DarkNet-53 model from 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
alpha : float, default 0.1
Slope coefficient for Leaky ReLU activation.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
alpha=0.1,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(DarkNet53, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.init_block_channels = init_block_channels
self.alpha = alpha
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=self.init_block_channels,
activation=(lambda y: tf.nn.leaky_relu(
y,
alpha=self.alpha,
name="features/init_block/activ")),
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = self.init_block_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
if j == 0:
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=2,
activation=(lambda y: tf.nn.leaky_relu(
y,
alpha=self.alpha,
name="features/stage{}/unit{}/active".format(i + 1, j + 1))),
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
else:
x = dark_unit(
x=x,
in_channels=in_channels,
out_channels=out_channels,
alpha=self.alpha,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_darknet53(model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create DarkNet model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
layers = [2, 3, 9, 9, 5]
channels_per_layers = [64, 128, 256, 512, 1024]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = DarkNet53(
channels=channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def darknet53(**kwargs):
"""
DarkNet-53 'Reference' model from 'YOLOv3: An Incremental Improvement,' https://arxiv.org/abs/1804.02767.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_darknet53(model_name="darknet53", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
darknet53,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != darknet53 or weight_count == 41609928)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 8,796 | 31.223443 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/mobilenet.py | """
MobileNet & FD-MobileNet for ImageNet-1K, implemented in TensorFlow.
Original papers:
- 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
- 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750.
"""
__all__ = ['MobileNet', 'mobilenet_w1', 'mobilenet_w3d4', 'mobilenet_wd2', 'mobilenet_wd4', 'fdmobilenet_w1',
'fdmobilenet_w3d4', 'fdmobilenet_wd2', 'fdmobilenet_wd4']
import os
import tensorflow as tf
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, is_channels_first, flatten
def dws_conv_block(x,
in_channels,
out_channels,
strides,
training,
data_format,
name="dws_conv_block"):
"""
Depthwise separable convolution block with BatchNorms and activations at each convolution layers. It is used as
a MobileNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'dws_conv_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = dwconv3x3_block(
x=x,
in_channels=in_channels,
out_channels=in_channels,
strides=strides,
training=training,
data_format=data_format,
name=name + "/dw_conv")
x = conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
training=training,
data_format=data_format,
name=name + "/pw_conv")
return x
class MobileNet(object):
"""
MobileNet model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861. Also this class implements FD-MobileNet from 'FD-MobileNet: Improved MobileNet
with A Fast Downsampling Strategy,' https://arxiv.org/abs/1802.03750.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
first_stage_stride : bool
Whether stride is used at the first stage.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
first_stage_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(MobileNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.first_stage_stride = first_stage_stride
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
init_block_channels = self.channels[0][0]
x = conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
training=training,
data_format=self.data_format,
name="features/init_block")
in_channels = init_block_channels
for i, channels_per_stage in enumerate(self.channels[1:]):
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and ((i != 0) or self.first_stage_stride) else 1
x = dws_conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = tf.keras.layers.AveragePooling2D(
pool_size=7,
strides=1,
data_format=self.data_format,
name="features/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
x = tf.keras.layers.Dense(
units=self.classes,
name="output")(x)
return x
def get_mobilenet(version,
width_scale,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create MobileNet or FD-MobileNet model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('orig' or 'fd').
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if version == 'orig':
channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 512], [1024, 1024]]
first_stage_stride = False
elif version == 'fd':
channels = [[32], [64], [128, 128], [256, 256], [512, 512, 512, 512, 512, 1024]]
first_stage_stride = True
else:
raise ValueError("Unsupported MobileNet version {}".format(version))
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
net = MobileNet(
channels=channels,
first_stage_stride=first_stage_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def mobilenet_w1(**kwargs):
"""
1.0 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenet(version="orig", width_scale=1.0, model_name="mobilenet_w1", **kwargs)
def mobilenet_w3d4(**kwargs):
"""
0.75 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenet(version="orig", width_scale=0.75, model_name="mobilenet_w3d4", **kwargs)
def mobilenet_wd2(**kwargs):
"""
0.5 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenet(version="orig", width_scale=0.5, model_name="mobilenet_wd2", **kwargs)
def mobilenet_wd4(**kwargs):
"""
0.25 MobileNet-224 model from 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenet(version="orig", width_scale=0.25, model_name="mobilenet_wd4", **kwargs)
def fdmobilenet_w1(**kwargs):
"""
FD-MobileNet 1.0x from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenet(version="fd", width_scale=1.0, model_name="fdmobilenet_w1", **kwargs)
def fdmobilenet_w3d4(**kwargs):
"""
FD-MobileNet 0.75x from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenet(version="fd", width_scale=0.75, model_name="fdmobilenet_w3d4", **kwargs)
def fdmobilenet_wd2(**kwargs):
"""
FD-MobileNet 0.5x from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenet(version="fd", width_scale=0.5, model_name="fdmobilenet_wd2", **kwargs)
def fdmobilenet_wd4(**kwargs):
"""
FD-MobileNet 0.25x from 'FD-MobileNet: Improved MobileNet with A Fast Downsampling Strategy,'
https://arxiv.org/abs/1802.03750.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_mobilenet(version="fd", width_scale=0.25, model_name="fdmobilenet_wd4", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
mobilenet_w1,
mobilenet_w3d4,
mobilenet_wd2,
mobilenet_wd4,
fdmobilenet_w1,
fdmobilenet_w3d4,
fdmobilenet_wd2,
fdmobilenet_wd4,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenet_w1 or weight_count == 4231976)
assert (model != mobilenet_w3d4 or weight_count == 2585560)
assert (model != mobilenet_wd2 or weight_count == 1331592)
assert (model != mobilenet_wd4 or weight_count == 470072)
assert (model != fdmobilenet_w1 or weight_count == 2901288)
assert (model != fdmobilenet_w3d4 or weight_count == 1833304)
assert (model != fdmobilenet_wd2 or weight_count == 993928)
assert (model != fdmobilenet_wd4 or weight_count == 383160)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 14,167 | 31.645161 | 119 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/darknet.py | """
DarkNet for ImageNet-1K, implemented in TensorFlow.
Original source: 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
"""
__all__ = ['DarkNet', 'darknet_ref', 'darknet_tiny', 'darknet19']
import os
import tensorflow as tf
from .common import conv2d, maxpool2d, conv1x1_block, conv3x3_block, is_channels_first, flatten
def dark_convYxY(x,
in_channels,
out_channels,
alpha,
pointwise,
training,
data_format,
name="dark_convYxY"):
"""
DarkNet unit.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
alpha : float
Slope coefficient for Leaky ReLU activation.
pointwise : bool
Whether use 1x1 (pointwise) convolution or 3x3 convolution.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'dark_convYxY'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
if pointwise:
return conv1x1_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
activation=(lambda y: tf.nn.leaky_relu(y, alpha=alpha, name=name + "/activ")),
training=training,
data_format=data_format,
name=name)
else:
return conv3x3_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
activation=(lambda y: tf.nn.leaky_relu(y, alpha=alpha, name=name + "/activ")),
training=training,
data_format=data_format,
name=name)
class DarkNet(object):
"""
DarkNet model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
odd_pointwise : bool
Whether pointwise convolution layer is used for each odd unit.
avg_pool_size : int
Window size of the final average pooling.
cls_activ : bool
Whether classification convolution layer uses an activation.
alpha : float, default 0.1
Slope coefficient for Leaky ReLU activation.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
odd_pointwise,
avg_pool_size,
cls_activ,
alpha=0.1,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(DarkNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.odd_pointwise = odd_pointwise
self.avg_pool_size = avg_pool_size
self.cls_activ = cls_activ
self.alpha = alpha
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
for i, channels_per_stage in enumerate(self.channels):
for j, out_channels in enumerate(channels_per_stage):
x = dark_convYxY(
x=x,
in_channels=in_channels,
out_channels=out_channels,
alpha=self.alpha,
pointwise=(len(channels_per_stage) > 1) and not (((j + 1) % 2 == 1) ^ self.odd_pointwise),
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
if i != len(self.channels) - 1:
x = maxpool2d(
x=x,
pool_size=2,
strides=2,
data_format=self.data_format,
name="features/pool{}".format(i + 1))
x = conv2d(
x=x,
in_channels=in_channels,
out_channels=self.classes,
kernel_size=1,
data_format=self.data_format,
name="output/final_conv")
if self.cls_activ:
x = tf.nn.leaky_relu(x, alpha=self.alpha, name="output/final_activ")
x = tf.keras.layers.AveragePooling2D(
pool_size=self.avg_pool_size,
strides=1,
data_format=self.data_format,
name="output/final_pool")(x)
# x = tf.layers.flatten(x)
x = flatten(
x=x,
data_format=self.data_format)
return x
def get_darknet(version,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create DarkNet model with specific parameters.
Parameters:
----------
version : str
Version of SqueezeNet ('ref', 'tiny' or '19').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
if version == 'ref':
channels = [[16], [32], [64], [128], [256], [512], [1024]]
odd_pointwise = False
avg_pool_size = 3
cls_activ = True
elif version == 'tiny':
channels = [[16], [32], [16, 128, 16, 128], [32, 256, 32, 256], [64, 512, 64, 512, 128]]
odd_pointwise = True
avg_pool_size = 14
cls_activ = False
elif version == '19':
channels = [[32], [64], [128, 64, 128], [256, 128, 256], [512, 256, 512, 256, 512],
[1024, 512, 1024, 512, 1024]]
odd_pointwise = False
avg_pool_size = 7
cls_activ = False
else:
raise ValueError("Unsupported DarkNet version {}".format(version))
net = DarkNet(
channels=channels,
odd_pointwise=odd_pointwise,
avg_pool_size=avg_pool_size,
cls_activ=cls_activ,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def darknet_ref(**kwargs):
"""
DarkNet 'Reference' model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_darknet(version="ref", model_name="darknet_ref", **kwargs)
def darknet_tiny(**kwargs):
"""
DarkNet Tiny model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_darknet(version="tiny", model_name="darknet_tiny", **kwargs)
def darknet19(**kwargs):
"""
DarkNet-19 model from 'Darknet: Open source neural networks in c,' https://github.com/pjreddie/darknet.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
Returns:
-------
functor
Functor for model graph creation with extra fields.
"""
return get_darknet(version="19", model_name="darknet19", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
darknet_ref,
darknet_tiny,
darknet19,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != darknet_ref or weight_count == 7319416)
assert (model != darknet_tiny or weight_count == 1042104)
assert (model != darknet19 or weight_count == 20842376)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 10,892 | 31.038235 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/alexnet.py | """
AlexNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
"""
__all__ = ['AlexNet', 'alexnet', 'alexnetb']
import os
import tensorflow as tf
from .common import maxpool2d, conv_block, is_channels_first, flatten
def alex_conv(x,
in_channels,
out_channels,
kernel_size,
strides,
padding,
use_lrn,
training,
data_format,
name="alex_conv"):
"""
AlexNet specific convolution block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
use_lrn : bool
Whether to use LRN layer.
training : bool
Whether to return the output in training mode or in inference mode.
data_format : str
The ordering of the dimensions in tensors.
name : str, default 'alex_conv'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = conv_block(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=True,
use_bn=False,
training=training,
data_format=data_format,
name=name + "/conv")
if use_lrn:
x = tf.nn.lrn(x, bias=2, alpha=1e-4, beta=0.75)
return x
def alex_dense(x,
in_channels,
out_channels,
training,
name="alex_dense"):
"""
AlexNet specific dense block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
name : str, default 'alex_dense'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
assert (in_channels > 0)
x = tf.keras.layers.Dense(
units=out_channels,
name=name + "/fc")(x)
x = tf.nn.relu(x, name=name + "/activ")
x = tf.keras.layers.Dropout(
rate=0.5,
name=name + "/dropout")(
inputs=x,
training=training)
return x
def alex_output_block(x,
in_channels,
classes,
training,
name="alex_output_block"):
"""
AlexNet specific output block.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
classes : int
Number of classification classes.
training : bool, or a TensorFlow boolean scalar tensor
Whether to return the output in training mode or in inference mode.
name : str, default 'alex_output_block'
Block name.
Returns:
-------
Tensor
Resulted tensor.
"""
mid_channels = 4096
x = alex_dense(
x=x,
in_channels=in_channels,
out_channels=mid_channels,
training=training,
name=name + "/fc1")
x = alex_dense(
x=x,
in_channels=mid_channels,
out_channels=mid_channels,
training=training,
name=name + "/fc2")
x = tf.keras.layers.Dense(
units=classes,
name=name + "/fc3")(x)
return x
class AlexNet(object):
"""
AlexNet model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
kernel_sizes : list of list of int
Convolution window sizes for each unit.
strides : list of list of int or tuple/list of 2 int
Strides of the convolution for each unit.
paddings : list of list of int or tuple/list of 2 int
Padding value for convolution layer for each unit.
use_lrn : bool
Whether to use LRN layer.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
kernel_sizes,
strides,
paddings,
use_lrn,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(AlexNet, self).__init__(**kwargs)
assert (data_format in ["channels_last", "channels_first"])
self.channels = channels
self.kernel_sizes = kernel_sizes
self.strides = strides
self.paddings = paddings
self.use_lrn = use_lrn
self.in_channels = in_channels
self.in_size = in_size
self.classes = classes
self.data_format = data_format
def __call__(self,
x,
training=False):
"""
Build a model graph.
Parameters:
----------
x : Tensor
Input tensor.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
Returns:
-------
Tensor
Resulted tensor.
"""
in_channels = self.in_channels
for i, channels_per_stage in enumerate(self.channels):
use_lrn_i = self.use_lrn and (i in [0, 1])
for j, out_channels in enumerate(channels_per_stage):
x = alex_conv(
x=x,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=self.kernel_sizes[i][j],
strides=self.strides[i][j],
padding=self.paddings[i][j],
use_lrn=use_lrn_i,
training=training,
data_format=self.data_format,
name="features/stage{}/unit{}".format(i + 1, j + 1))
in_channels = out_channels
x = maxpool2d(
x=x,
pool_size=3,
strides=2,
padding=0,
ceil_mode=True,
data_format=self.data_format,
name="features/stage{}/pool".format(i + 1))
in_channels = in_channels * 6 * 6
x = flatten(
x=x,
data_format=self.data_format)
x = alex_output_block(
x=x,
in_channels=in_channels,
classes=self.classes,
training=training,
name="output")
return x
def get_alexnet(version="a",
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create AlexNet model with specific parameters.
Parameters:
----------
version : str, default 'a'
Version of AlexNet ('a' or 'b').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if version == "a":
channels = [[96], [256], [384, 384, 256]]
kernel_sizes = [[11], [5], [3, 3, 3]]
strides = [[4], [1], [1, 1, 1]]
paddings = [[0], [2], [1, 1, 1]]
use_lrn = True
elif version == "b":
channels = [[64], [192], [384, 256, 256]]
kernel_sizes = [[11], [5], [3, 3, 3]]
strides = [[4], [1], [1, 1, 1]]
paddings = [[2], [2], [1, 1, 1]]
use_lrn = False
else:
raise ValueError("Unsupported AlexNet version {}".format(version))
net = AlexNet(
channels=channels,
kernel_sizes=kernel_sizes,
strides=strides,
paddings=paddings,
use_lrn=use_lrn,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_state_dict
net.state_dict, net.file_path = download_state_dict(
model_name=model_name,
local_model_store_dir_path=root)
else:
net.state_dict = None
net.file_path = None
return net
def alexnet(**kwargs):
"""
AlexNet model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_alexnet(model_name="alexnet", **kwargs)
def alexnetb(**kwargs):
"""
AlexNet-b model from 'One weird trick for parallelizing convolutional neural networks,'
https://arxiv.org/abs/1404.5997. Non-standard version.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_alexnet(version="b", model_name="alexnetb", **kwargs)
def _test():
import numpy as np
data_format = "channels_last"
pretrained = False
models = [
alexnet,
alexnetb,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
x = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),
name="xx")
y_net = net(x)
weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != alexnet or weight_count == 62378344)
assert (model != alexnetb or weight_count == 61100840)
with tf.Session() as sess:
if pretrained:
from .model_store import init_variables_from_state_dict
init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)
else:
sess.run(tf.global_variables_initializer())
x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)
y = sess.run(y_net, feed_dict={x: x_value})
assert (y.shape == (1, 1000))
tf.reset_default_graph()
if __name__ == "__main__":
_test()
| 11,485 | 28.603093 | 116 | py |
imgclsmob | imgclsmob-master/tensorflow_/tensorflowcv/models/others/__init__.py | 0 | 0 | 0 | py | |
imgclsmob | imgclsmob-master/tests/convert_gl2tf2_conv2d_b.py | import numpy as np
import tensorflow as tf
import tensorflow.keras.layers as nn
def is_channels_first(data_format):
"""
Is tested data format channels first.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
bool
A flag.
"""
return data_format == "channels_first"
class TF2Model(tf.keras.Model):
def __init__(self,
data_format="channels_last",
**kwargs):
super(TF2Model, self).__init__(**kwargs)
self.conv = nn.Conv2D(
filters=64,
kernel_size=(7, 7),
strides=1,
padding="same",
data_format=data_format,
dilation_rate=1,
use_bias=False,
name="conv")
def call(self, x):
x = self.conv(x)
return x
def gl_calc(gl_w, x):
import mxnet as mx
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.conv = mx.gluon.nn.Conv2D(
channels=64,
kernel_size=(7, 7),
strides=1,
padding=(3, 3),
use_bias=False,
in_channels=3)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
# gl_w = np.transpose(tf2_w, axes=(3, 2, 0, 1))
gl_params['conv.weight']._load_init(mx.nd.array(gl_w, ctx), ctx)
# gl_params['conv.bias']._load_init(mx.nd.array(b, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
return gl_y
def main():
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
success = True
for i in range(10):
gl_w = np.random.randn(64, 3, 7, 7).astype(np.float32)
# tf2_w = np.random.randn(7, 7, 3, 64).astype(np.float32)
b = np.random.randn(64, ).astype(np.float32)
x = np.random.randn(10, 3, 224, 256).astype(np.float32)
assert (b is not None)
data_format = "channels_last"
# data_format = "channels_first"
tf2_use_cuda = True
if not tf2_use_cuda:
with tf.device("/cpu:0"):
tf2_model = TF2Model(data_format=data_format)
else:
tf2_model = TF2Model(data_format=data_format)
input_shape = (1, 224, 256, 3) if data_format == "channels_last" else (1, 3, 224, 256)
tf2_model.build(input_shape=input_shape)
tf2_params = {v.name: v for v in tf2_model.weights}
# print(tf2_params["conv/kernel:0"].shape)
# tf2_w = np.transpose(gl_w, axes=(2, 3, 1, 0))
tf2_w = np.transpose(gl_w, axes=(2, 3, 1, 0))
tf2_params["conv/kernel:0"].assign(tf2_w)
# tf2_params["conv/bias:0"].assign(b)
tf2_x = x.transpose((0, 2, 3, 1)) if data_format == "channels_last" else x
tf2_x = tf.convert_to_tensor(tf2_x)
tf2_y = tf2_model(tf2_x).numpy()
if data_format == "channels_last":
tf2_y = tf2_y.transpose((0, 3, 1, 2))
gl_y = gl_calc(gl_w, x)
dist = np.sum(np.abs(gl_y - tf2_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == "__main__":
main()
| 3,744 | 26.947761 | 94 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf_batchnorm.py | import numpy as np
import mxnet as mx
import tensorflow as tf
LENGTH = 64
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.bn = mx.gluon.nn.BatchNorm(
momentum=0.9,
epsilon=1e-5,
in_channels=LENGTH,
use_global_stats=False)
def hybrid_forward(self, F, x):
x = self.bn(x)
return x
def batchnorm(x,
momentum=0.9,
epsilon=1e-5,
training=False,
name=None):
"""
Batch normalization layer.
Parameters:
----------
x : Tensor
Input tensor.
momentum : float, default 0.9
Momentum for the moving average.
epsilon : float, default 1e-5
Small float added to variance to avoid dividing by zero.
training : bool, or a TensorFlow boolean scalar tensor, default False
Whether to return the output in training mode or in inference mode.
name : str, default 'conv2d'
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
x = tf.layers.batch_normalization(
inputs=x,
axis=1,
momentum=momentum,
epsilon=epsilon,
training=training,
name=name)
return x
def tensorflow_model(x):
x = batchnorm(
x=x,
training=False,
name="bn")
return x
def main():
success = True
for i in range(10):
g = np.random.randn(LENGTH, ).astype(np.float32)
b = np.random.randn(LENGTH, ).astype(np.float32)
m = np.random.randn(LENGTH, ).astype(np.float32)
v = np.random.randn(LENGTH, ).astype(np.float32)
b = b - b.min() + 1.0
v = v - v.min() + 1.0
IMG_SIZE = 224
x = np.random.randn(10, LENGTH, IMG_SIZE, IMG_SIZE).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_params['bn.gamma']._load_init(mx.nd.array(g, ctx), ctx)
gl_params['bn.beta']._load_init(mx.nd.array(b, ctx), ctx)
gl_params['bn.running_mean']._load_init(mx.nd.array(m, ctx), ctx)
gl_params['bn.running_var']._load_init(mx.nd.array(v, ctx), ctx)
# gl_model.initialize()
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
xx = tf.placeholder(
dtype=tf.float32,
shape=(None, LENGTH, IMG_SIZE, IMG_SIZE),
name='xx')
tf_model = tensorflow_model(xx)
tf_params = {v.name: v for v in tf.global_variables()}
with tf.Session() as sess:
sess.run(tf_params['bn/gamma:0'].assign(g))
sess.run(tf_params['bn/beta:0'].assign(b))
sess.run(tf_params['bn/moving_mean:0'].assign(m))
sess.run(tf_params['bn/moving_variance:0'].assign(v))
tf_y = sess.run(tf_model, feed_dict={xx: x})
tf.reset_default_graph()
diff = np.abs(gl_y - tf_y)
dist = np.sum(diff)
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 3,380 | 25.414063 | 78 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf_dense.py | import numpy as np
import mxnet as mx
import tensorflow as tf
# import tensorflow.contrib.slim as slim
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.dense = mx.gluon.nn.Dense(
units=1000,
use_bias=False,
flatten=True,
in_units=1024)
def hybrid_forward(self, F, x):
x = self.dense(x)
return x
def tensorflow_model(x):
# x = slim.fully_connected(
# inputs=x,
# num_outputs=1000,
# activation_fn=None,
# scope='dense')
x = tf.layers.dense(
inputs=x,
units=1000,
use_bias=False,
name="dense")
return x
def main():
success = True
for i in range(10):
# gl_w = np.random.randn(1000, 1024).astype(np.float32)
tf_w = np.random.randn(1024, 1000).astype(np.float32)
# b = np.random.randn(1000, ).astype(np.float32)
x = np.random.randn(1, 1024).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_w = np.transpose(tf_w, axes=(1, 0))
gl_params['dense.weight']._load_init(mx.nd.array(gl_w, ctx), ctx)
# gl_params['dense.bias']._load_init(mx.nd.array(b, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
xx = tf.placeholder(
dtype=tf.float32,
shape=(None, 1024),
name='xx')
tf_model = tensorflow_model(xx)
tf_params = {v.name: v for v in tf.global_variables()}
with tf.Session() as sess:
# tf_w = np.transpose(gl_w, axes=(1, 0))
sess.run(tf_params['dense/kernel:0'].assign(tf_w))
# sess.run(tf_params['dense/bias:0'].assign(b))
# sess.run(tf_params['dense/weights:0'].assign(tf_w))
# sess.run(tf_params['dense/biases:0'].assign(b))
tf_y = sess.run(tf_model, feed_dict={xx: x})
tf.reset_default_graph()
dist = np.sum(np.abs(gl_y - tf_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
y = np.matmul(gl_w.astype(np.float64), x[0].astype(np.float64))
# y = np.dot(w, x[0])
gl_dist = np.sum(np.abs(gl_y - y))
tf_dist = np.sum(np.abs(tf_y - y))
print("i={}, gl_dist={}".format(i, gl_dist))
print("i={}, tf_dist={}".format(i, tf_dist))
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 2,765 | 27.8125 | 75 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf_maxpool2d.py | import math
import numpy as np
import mxnet as mx
import tensorflow as tf
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.pool = mx.gluon.nn.MaxPool2D(
pool_size=2,
strides=2,
padding=0)
def hybrid_forward(self, F, x):
x = self.pool(x)
return x
def maxpool2d(x,
pool_size,
strides,
padding=0,
ceil_mode=False,
name=None):
"""
Max pooling operation for two dimensional (spatial) data.
Parameters:
----------
x : Tensor
Input tensor.
pool_size : int or tuple/list of 2 int
Size of the max pooling windows.
strides : int or tuple/list of 2 int
Strides of the pooling.
padding : int or tuple/list of 2 int, default 0
Padding value for convolution layer.
ceil_mode : bool, default False
When `True`, will use ceil instead of floor to compute the output shape.
name : str, default 'conv2d'
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
if isinstance(padding, int):
padding = (padding, padding)
if ceil_mode:
height = x.shape[2]
out_height = float(height + 2 * padding[0] - pool_size[0]) / strides[0] + 1.0
if math.ceil(out_height) > math.floor(out_height):
padding[0] += 1
width = x.shape[3]
out_width = float(width + 2 * padding[1] - pool_size[1]) / strides[1] + 1.0
if math.ceil(out_width) > math.floor(out_width):
padding[1] += 1
if (padding[0] > 0) or (padding[1] > 0):
x = tf.pad(x, [[0, 0], [0, 0], list(padding), list(padding)], mode="REFLECT")
x = tf.layers.max_pooling2d(
inputs=x,
pool_size=pool_size,
strides=strides,
padding='valid',
data_format='channels_first',
name=name)
# if isinstance(pool_size, int):
# pool_size = (pool_size, pool_size)
# if isinstance(strides, int):
# strides = (strides, strides)
# x = tf.nn.max_pool(
# value=x,
# ksize=(1, 1) + pool_size,
# strides=(1, 1) + strides,
# padding='VALID',
# data_format='NCHW',
# name=name)
return x
def tensorflow_model(x):
x = maxpool2d(
x=x,
pool_size=2,
strides=2,
padding=0,
ceil_mode=False,
name="pool")
return x
def main():
success = True
for i in range(10):
x = np.random.randn(10, 10, 224, 224).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
xx = tf.placeholder(
dtype=tf.float32,
shape=(None, 10, 224, 224),
name='xx')
tf_model = tensorflow_model(xx)
with tf.Session() as sess:
tf_y = sess.run(tf_model, feed_dict={xx: x})
tf.reset_default_graph()
dist = np.sum(np.abs(gl_y - tf_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 3,444 | 23.607143 | 85 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf2_lstm.py | import numpy as np
import tensorflow as tf
import tensorflow.keras.layers as nn
def _calc_width(net):
import numpy as np
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
class TF2Model(tf.keras.Model):
def __init__(self,
**kwargs):
super(TF2Model, self).__init__(**kwargs)
# self.rnn = nn.LSTM(
# units=100,
# dropout=0.2,
# name="rnn")
# self.rnn = nn.RNN([nn.LSTMCell(
# units=100,
# dropout=0.2,
# unit_forget_bias=False,
# name="rnn{}".format(i)
# ) for i in range(2)])
self.rnn = nn.RNN([tf.compat.v1.nn.rnn_cell.LSTMCell(
num_units=100,
use_peepholes=False,
name="rnn{}".format(i)
) for i in range(2)])
def call(self, x):
x = self.rnn(x)
return x
def gl_calc():
import mxnet as mx
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.rnn = mx.gluon.rnn.LSTM(
hidden_size=100,
num_layers=2,
dropout=0.2,
input_size=80)
def hybrid_forward(self, F, x):
x = self.rnn(x)
# src_params = self._collect_params_with_prefix()
# src_param_keys = list(src_params.keys())
# src_params[src_param_keys[0]]._data[0].asnumpy()
# dst_params[dst_key]._load_init(mx.nd.array(src_params[src_key].numpy(), ctx), ctx)
return x
gl_model = GluonModel()
# # ctx = mx.cpu()
# ctx = mx.gpu(0)
# gl_params = gl_model._collect_params_with_prefix()
# # gl_w = np.transpose(tf2_w, axes=(3, 2, 0, 1))
# gl_params['conv.weight']._load_init(mx.nd.array(gl_w, ctx), ctx)
# # gl_params['conv.bias']._load_init(mx.nd.array(b, ctx), ctx)
#
# gl_x = mx.nd.array(x, ctx)
# gl_y = gl_model(gl_x).asnumpy()
ctx = mx.gpu(0)
gl_x = mx.nd.zeros((3, 7, 80), ctx)
gl_model.initialize(ctx=ctx)
gl_model(gl_x)
# gl_params = gl_model._collect_params_with_prefix()
_calc_width(gl_model)
return gl_model
def main():
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
success = True
for i in range(10):
# tf2_model = TF2Model()
# batch_size = 1
# input_shape = (3, 7, 80)
# tf2_model(tf.random.normal(input_shape))
# dst_param_keys = [v.name for v in tf2_model.weights]
# dst_params = {v.name: v for v in tf2_model.weights}
#
# gl_calc()
gl_w = np.random.randn(64, 3, 7, 7).astype(np.float32)
# tf2_w = np.random.randn(7, 7, 3, 64).astype(np.float32)
b = np.random.randn(64, ).astype(np.float32)
x = np.random.randn(10, 3, 224, 256).astype(np.float32)
assert (b is not None)
data_format = "channels_last"
# data_format = "channels_first"
tf2_use_cuda = True
if not tf2_use_cuda:
with tf.device("/cpu:0"):
tf2_model = TF2Model(data_format=data_format)
else:
tf2_model = TF2Model(data_format=data_format)
input_shape = (1, 224, 256, 3) if data_format == "channels_last" else (1, 3, 224, 256)
tf2_model.build(input_shape=input_shape)
tf2_params = {v.name: v for v in tf2_model.weights}
# print(tf2_params["conv/kernel:0"].shape)
# tf2_w = np.transpose(gl_w, axes=(2, 3, 1, 0))
tf2_w = np.transpose(gl_w, axes=(2, 3, 1, 0))
tf2_params["conv/kernel:0"].assign(tf2_w)
# tf2_params["conv/bias:0"].assign(b)
tf2_x = x.transpose((0, 2, 3, 1)) if data_format == "channels_last" else x
tf2_x = tf.convert_to_tensor(tf2_x)
tf2_y = tf2_model(tf2_x).numpy()
if data_format == "channels_last":
tf2_y = tf2_y.transpose((0, 3, 1, 2))
gl_y = gl_calc(gl_w, x)
dist = np.sum(np.abs(gl_y - tf2_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == "__main__":
main()
| 4,631 | 29.675497 | 96 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf2_batchnorm.py | import numpy as np
import mxnet as mx
import tensorflow as tf
import tensorflow.keras.layers as nn
LENGTH = 64
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.bn = mx.gluon.nn.BatchNorm(
momentum=0.9,
epsilon=1e-5,
in_channels=LENGTH,
use_global_stats=False)
def hybrid_forward(self, F, x):
x = self.bn(x)
return x
def is_channels_first(data_format):
"""
Is tested data format channels first.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
bool
A flag.
"""
return data_format == "channels_first"
def get_channel_axis(data_format):
"""
Get channel axis.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
int
Channel axis.
"""
return 1 if is_channels_first(data_format) else -1
class BatchNorm(nn.BatchNormalization):
"""
MXNet/Gluon-like batch normalization.
Parameters:
----------
momentum : float, default 0.9
Momentum for the moving average.
epsilon : float, default 1e-5
Small float added to variance to avoid dividing by zero.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
momentum=0.9,
epsilon=1e-5,
data_format="channels_last",
**kwargs):
super(BatchNorm, self).__init__(
axis=get_channel_axis(data_format),
momentum=momentum,
epsilon=epsilon,
**kwargs)
class TF2Model(tf.keras.Model):
def __init__(self,
bn_eps=1e-5,
data_format="channels_last",
**kwargs):
super(TF2Model, self).__init__(**kwargs)
self.bn = BatchNorm(
epsilon=bn_eps,
data_format=data_format,
name="bn")
def call(self, x, training=None):
x = self.bn(x, training=training)
return x
def main():
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
success = True
for i in range(10):
g = np.random.randn(LENGTH, ).astype(np.float32)
b = np.random.randn(LENGTH, ).astype(np.float32)
m = np.random.randn(LENGTH, ).astype(np.float32)
v = np.random.randn(LENGTH, ).astype(np.float32)
b = b - b.min() + 1.0
v = v - v.min() + 1.0
IMG_SIZE = 224
x = np.random.randn(10, LENGTH, IMG_SIZE, IMG_SIZE).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_params['bn.gamma']._load_init(mx.nd.array(g, ctx), ctx)
gl_params['bn.beta']._load_init(mx.nd.array(b, ctx), ctx)
gl_params['bn.running_mean']._load_init(mx.nd.array(m, ctx), ctx)
gl_params['bn.running_var']._load_init(mx.nd.array(v, ctx), ctx)
# gl_model.initialize()
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
data_format = "channels_last"
# data_format = "channels_first"
tf2_use_cuda = True
if not tf2_use_cuda:
with tf.device("/cpu:0"):
tf2_model = TF2Model(data_format=data_format)
else:
tf2_model = TF2Model(data_format=data_format)
input_shape = (1, IMG_SIZE, IMG_SIZE, LENGTH) if data_format == "channels_last" else\
(1, LENGTH, IMG_SIZE, IMG_SIZE)
tf2_model.build(input_shape=input_shape)
tf2_params = {v.name: v for v in tf2_model.weights}
tf2_params["bn/gamma:0"].assign(g)
tf2_params["bn/beta:0"].assign(b)
tf2_params["bn/moving_mean:0"].assign(m)
tf2_params["bn/moving_variance:0"].assign(v)
tf2_x = x.transpose((0, 2, 3, 1)) if data_format == "channels_last" else x
tf2_x = tf.convert_to_tensor(tf2_x)
tf2_y = tf2_model(tf2_x).numpy()
if data_format == "channels_last":
tf2_y = tf2_y.transpose((0, 3, 1, 2))
diff = np.abs(gl_y - tf2_y)
dist = diff.mean()
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 4,796 | 26.568966 | 93 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2pt_batchnorm.py | import numpy as np
import mxnet as mx
import torch
from torch.autograd import Variable
LENGTH = 64
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.bn = mx.gluon.nn.BatchNorm(
momentum=0.9,
epsilon=1e-5,
in_channels=LENGTH,
use_global_stats=False)
def hybrid_forward(self, F, x):
x = self.bn(x)
return x
class PytorchModel(torch.nn.Module):
def __init__(self):
super(PytorchModel, self).__init__()
self.bn = torch.nn.BatchNorm2d(
num_features=LENGTH,
eps=1e-5,
momentum=0.9)
def forward(self, x):
x = self.bn(x)
return x
def main():
success = True
for i in range(10):
g = np.random.randn(LENGTH, ).astype(np.float32)
b = np.random.randn(LENGTH, ).astype(np.float32)
m = np.random.randn(LENGTH, ).astype(np.float32)
v = np.random.randn(LENGTH, ).astype(np.float32)
b = b - b.min() + 1.0
v = v - v.min() + 1.0
IMG_SIZE = 224
x = np.random.randn(1, LENGTH, IMG_SIZE, IMG_SIZE).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_params['bn.gamma']._load_init(mx.nd.array(g, ctx), ctx)
gl_params['bn.beta']._load_init(mx.nd.array(b, ctx), ctx)
gl_params['bn.running_mean']._load_init(mx.nd.array(m, ctx), ctx)
gl_params['bn.running_var']._load_init(mx.nd.array(v, ctx), ctx)
# gl_model.initialize()
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
pt_model = PytorchModel()
pt_model.eval()
pt_params = pt_model.state_dict()
pt_params['bn.weight'] = torch.from_numpy(g)
pt_params['bn.bias'] = torch.from_numpy(b)
pt_params['bn.running_mean'] = torch.from_numpy(m)
pt_params['bn.running_var'] = torch.from_numpy(v)
pt_model.load_state_dict(pt_params)
pt_model = pt_model.cuda()
pt_x = Variable(torch.from_numpy(x)).cuda()
pt_y = pt_model(pt_x).detach().cpu().numpy()
diff = np.abs(gl_y - pt_y)
dist = np.sum(diff)
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(pt_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 2,622 | 25.494949 | 77 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2pt_conv2d.py | import numpy as np
import mxnet as mx
import torch
from torch.autograd import Variable
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.conv = mx.gluon.nn.Conv2D(
channels=64,
kernel_size=7,
strides=2,
padding=3,
use_bias=True,
in_channels=3)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
class PytorchModel(torch.nn.Module):
def __init__(self):
super(PytorchModel, self).__init__()
self.conv = torch.nn.Conv2d(
in_channels=3,
out_channels=64,
kernel_size=7,
stride=2,
padding=3,
bias=True)
def forward(self, x):
x = self.conv(x)
return x
def main():
success = True
for i in range(10):
# w = np.random.randint(10, size=(64, 3, 7, 7)).astype(np.float32)
# x = np.random.randint(10, size=(1, 3, 224, 224)).astype(np.float32)
w = np.random.randn(64, 3, 7, 7).astype(np.float32)
b = np.random.randn(64, ).astype(np.float32)
x = np.random.randn(10, 3, 224, 224).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_params['conv.weight']._load_init(mx.nd.array(w, ctx), ctx)
gl_params['conv.bias']._load_init(mx.nd.array(b, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
pt_model = PytorchModel()
pt_model.eval()
pt_params = pt_model.state_dict()
pt_params['conv.weight'] = torch.from_numpy(w)
pt_params['conv.bias'] = torch.from_numpy(b)
pt_model.load_state_dict(pt_params)
pt_model = pt_model.cuda()
pt_x = Variable(torch.from_numpy(x)).cuda()
pt_y = pt_model(pt_x).detach().cpu().numpy()
dist = np.sum(np.abs(gl_y - pt_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 2,352 | 24.576087 | 77 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf_conv2d.py | import numpy as np
import mxnet as mx
import tensorflow as tf
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.conv = mx.gluon.nn.Conv2D(
channels=64,
kernel_size=7,
strides=2,
padding=3,
use_bias=True,
in_channels=3)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
# def tensorflow_model(x):
#
# padding = 3
# x = tf.pad(x, [[0, 0], [0, 0], [padding, padding], [padding, padding]])
# x = tf.layers.conv2d(
# inputs=x,
# filters=64,
# kernel_size=7,
# strides=2,
# padding='valid',
# data_format='channels_first',
# use_bias=False,
# name='conv')
# return x
def conv2d(x,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
groups=1,
use_bias=True,
name="conv2d"):
"""
Convolution 2D layer wrapper.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
name : str, default 'conv2d'
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if groups != 1:
raise NotImplementedError
if (padding[0] > 0) or (padding[1] > 0):
x = tf.pad(x, [[0, 0], [0, 0], list(padding), list(padding)])
x = tf.layers.conv2d(
inputs=x,
filters=out_channels,
kernel_size=kernel_size,
strides=strides,
padding='valid',
data_format='channels_first',
use_bias=use_bias,
name=name)
return x
def tensorflow_model(x):
x = conv2d(
x=x,
in_channels=3,
out_channels=64,
kernel_size=7,
strides=2,
padding=3,
use_bias=True,
name="conv")
return x
def main():
success = True
for i in range(10):
# gl_w = np.random.randn(64, 3, 7, 7).astype(np.float32)
tf_w = np.random.randn(7, 7, 3, 64).astype(np.float32)
b = np.random.randn(64, ).astype(np.float32)
x = np.random.randn(10, 3, 224, 224).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_w = np.transpose(tf_w, axes=(3, 2, 0, 1))
gl_params['conv.weight']._load_init(mx.nd.array(gl_w, ctx), ctx)
gl_params['conv.bias']._load_init(mx.nd.array(b, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
xx = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224),
name='xx')
tf_model = tensorflow_model(xx)
tf_params = {v.name: v for v in tf.global_variables()}
with tf.Session() as sess:
# tf_w = np.transpose(gl_w, axes=(2, 3, 1, 0))
sess.run(tf_params['conv/kernel:0'].assign(tf_w))
sess.run(tf_params['conv/bias:0'].assign(b))
tf_y = sess.run(tf_model, feed_dict={xx: x})
tf.reset_default_graph()
dist = np.sum(np.abs(gl_y - tf_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 4,243 | 24.566265 | 77 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf2_conv2d.py | import numpy as np
import tensorflow as tf
import tensorflow.keras.layers as nn
def is_channels_first(data_format):
"""
Is tested data format channels first.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
bool
A flag.
"""
return data_format == "channels_first"
class TF2Model(tf.keras.Model):
def __init__(self,
data_format="channels_last",
**kwargs):
super(TF2Model, self).__init__(**kwargs)
padding = (3, 3)
if isinstance(padding, int):
padding = (padding, padding)
if is_channels_first(data_format):
self.paddings_tf = [[0, 0], [0, 0], list(padding), list(padding)]
else:
self.paddings_tf = [[0, 0], list(padding), list(padding), [0, 0]]
self.conv = nn.Conv2D(
filters=64,
kernel_size=(7, 7),
strides=2,
padding="valid",
data_format=data_format,
dilation_rate=1,
use_bias=False,
name="conv")
def call(self, x):
x = tf.pad(x, paddings=self.paddings_tf)
x = self.conv(x)
return x
class TF2Model2(tf.keras.Model):
def __init__(self,
data_format="channels_last",
**kwargs):
super(TF2Model2, self).__init__(**kwargs)
padding = (3, 3)
if isinstance(padding, int):
padding = (padding, padding)
self.pad = nn.ZeroPadding2D(
padding=padding,
data_format=data_format)
self.conv = nn.Conv2D(
filters=64,
kernel_size=(7, 7),
strides=2,
padding="valid",
data_format=data_format,
dilation_rate=1,
use_bias=False,
name="conv")
def call(self, x):
x = self.pad(x)
x = self.conv(x)
return x
def gl_calc(gl_w, x):
import mxnet as mx
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.conv = mx.gluon.nn.Conv2D(
channels=64,
kernel_size=(7, 7),
strides=2,
padding=(3, 3),
use_bias=False,
in_channels=3)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
# gl_w = np.transpose(tf2_w, axes=(3, 2, 0, 1))
gl_params['conv.weight']._load_init(mx.nd.array(gl_w, ctx), ctx)
# gl_params['conv.bias']._load_init(mx.nd.array(b, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
return gl_y
def main():
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
success = True
for i in range(10):
gl_w = np.random.randn(64, 3, 7, 7).astype(np.float32)
# tf2_w = np.random.randn(7, 7, 3, 64).astype(np.float32)
b = np.random.randn(64, ).astype(np.float32)
x = np.random.randn(10, 3, 224, 256).astype(np.float32)
assert (b is not None)
data_format = "channels_last"
# data_format = "channels_first"
tf2_use_cuda = True
if not tf2_use_cuda:
with tf.device("/cpu:0"):
tf2_model = TF2Model2(data_format=data_format)
else:
tf2_model = TF2Model2(data_format=data_format)
input_shape = (1, 224, 256, 3) if data_format == "channels_last" else (1, 3, 224, 256)
tf2_model.build(input_shape=input_shape)
tf2_params = {v.name: v for v in tf2_model.weights}
# print(tf2_params["conv/kernel:0"].shape)
# tf2_w = np.transpose(gl_w, axes=(2, 3, 1, 0))
tf2_w = np.transpose(gl_w, axes=(2, 3, 1, 0))
tf2_params["conv/kernel:0"].assign(tf2_w)
# tf2_params["conv/bias:0"].assign(b)
tf2_x = x.transpose((0, 2, 3, 1)) if data_format == "channels_last" else x
tf2_x = tf.convert_to_tensor(tf2_x)
tf2_y = tf2_model(tf2_x).numpy()
if data_format == "channels_last":
tf2_y = tf2_y.transpose((0, 3, 1, 2))
gl_y = gl_calc(gl_w, x)
dist = np.sum(np.abs(gl_y - tf2_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == "__main__":
main()
| 4,851 | 27.209302 | 94 | py |
imgclsmob | imgclsmob-master/tests/__init__.py | 0 | 0 | 0 | py | |
imgclsmob | imgclsmob-master/tests/convert_gl2tf2_avgpool2d.py | import math
import numpy as np
import mxnet as mx
import tensorflow as tf
import tensorflow.keras.layers as nn
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.pool = mx.gluon.nn.AvgPool2D(
pool_size=3,
strides=2,
padding=1,
ceil_mode=True,
count_include_pad=True)
def hybrid_forward(self, F, x):
x = self.pool(x)
return x
def is_channels_first(data_format):
return data_format == "channels_first"
class TF2Model(tf.keras.Model):
def __init__(self,
pool_size=3,
strides=2,
padding=1,
ceil_mode=True,
data_format="channels_last",
**kwargs):
super(TF2Model, self).__init__(**kwargs)
if isinstance(pool_size, int):
pool_size = (pool_size, pool_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
self.use_stride_pool = (strides[0] > 1) or (strides[1] > 1)
self.ceil_mode = ceil_mode and self.use_stride_pool
self.use_pad = (padding[0] > 0) or (padding[1] > 0)
if self.ceil_mode:
self.padding = padding
self.pool_size = pool_size
self.strides = strides
self.data_format = data_format
elif self.use_pad:
if is_channels_first(data_format):
self.paddings_tf = [[0, 0], [0, 0], list(padding), list(padding)]
else:
self.paddings_tf = [[0, 0], list(padding), list(padding), [0, 0]]
self.pool = nn.AveragePooling2D(
pool_size=pool_size,
strides=1,
padding="valid",
data_format=data_format,
name="pool")
if self.use_stride_pool:
self.stride_pool = nn.AveragePooling2D(
pool_size=1,
strides=strides,
padding="valid",
data_format=data_format,
name="stride_pool")
def call(self, x):
if self.ceil_mode:
x_shape = x.get_shape().as_list()
if is_channels_first(self.data_format):
height = x_shape[2]
width = x_shape[3]
else:
height = x_shape[1]
width = x_shape[2]
padding = self.padding
out_height = float(height + 2 * padding[0] - self.pool_size[0]) / self.strides[0] + 1.0
out_width = float(width + 2 * padding[1] - self.pool_size[1]) / self.strides[1] + 1.0
if math.ceil(out_height) > math.floor(out_height):
padding = (padding[0] + 1, padding[1])
if math.ceil(out_width) > math.floor(out_width):
padding = (padding[0], padding[1] + 1)
if (padding[0] > 0) or (padding[1] > 0):
if is_channels_first(self.data_format):
paddings_tf = [[0, 0], [0, 0], list(padding), list(padding)]
else:
paddings_tf = [[0, 0], list(padding), list(padding), [0, 0]]
x = tf.pad(x, paddings=paddings_tf)
elif self.use_pad:
x = tf.pad(x, paddings=self.paddings_tf)
x = self.pool(x)
if self.use_stride_pool:
x = self.stride_pool(x)
return x
def main():
success = True
for i in range(10):
x = np.random.randn(12, 10, 224, 224).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
data_format = "channels_last"
# data_format = "channels_first"
tf2_use_cuda = True
if not tf2_use_cuda:
with tf.device("/cpu:0"):
tf2_model = TF2Model(data_format=data_format)
else:
tf2_model = TF2Model(data_format=data_format)
input_shape = (1, 224, 224, 10) if data_format == "channels_last" else (1, 10, 224, 224)
tf2_model.build(input_shape=input_shape)
# tf2_params = {v.name: v for v in tf2_model.weights}
tf2_x = x.transpose((0, 2, 3, 1)) if data_format == "channels_last" else x
tf2_x = tf.convert_to_tensor(tf2_x)
tf2_y = tf2_model(tf2_x).numpy()
if data_format == "channels_last":
tf2_y = tf2_y.transpose((0, 3, 1, 2))
dist = np.sum(np.abs(gl_y - tf2_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 4,921 | 30.961039 | 99 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2pt_dense.py | import numpy as np
import mxnet as mx
import torch
from torch.autograd import Variable
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.dense = mx.gluon.nn.Dense(
units=1000,
use_bias=False,
in_units=1024)
def hybrid_forward(self, F, x):
x = self.dense(x)
return x
class PytorchModel(torch.nn.Module):
def __init__(self):
super(PytorchModel, self).__init__()
self.dense = torch.nn.Linear(
in_features=1024,
out_features=1000,
bias=False)
def forward(self, x):
x = self.dense(x)
return x
def main():
success = True
for i in range(10):
w = np.random.randn(1000, 1024).astype(np.float32)
# b = np.random.randn(1000, ).astype(np.float32)
x = np.random.randn(1, 1024).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_params['dense.weight']._load_init(mx.nd.array(w, ctx), ctx)
# gl_params['dense.bias']._load_init(mx.nd.array(b, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
pt_model = PytorchModel()
pt_model.eval()
pt_params = pt_model.state_dict()
pt_params['dense.weight'] = torch.from_numpy(w)
# pt_params['dense.bias'] = torch.from_numpy(b)
pt_model.load_state_dict(pt_params)
pt_model = pt_model.cuda()
pt_x = Variable(torch.from_numpy(x)).cuda()
pt_y = pt_model(pt_x).detach().cpu().numpy()
dist = np.sum(np.abs(gl_y - pt_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(pt_y)
y = np.matmul(w.astype(np.float64), x[0].astype(np.float64))
# y = np.dot(w, x[0])
gl_dist = np.sum(np.abs(gl_y - y))
pt_dist = np.sum(np.abs(pt_y - y))
print("i={}, gl_dist={}".format(i, gl_dist))
print("i={}, pt_dist={}".format(i, pt_dist))
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 2,369 | 25.333333 | 72 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf_gconv2d.py | import numpy as np
import mxnet as mx
import tensorflow as tf
GROUPS = 8
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.g_conv = mx.gluon.nn.Conv2D(
channels=32,
kernel_size=7,
strides=2,
padding=3,
groups=GROUPS,
use_bias=False,
in_channels=128)
def hybrid_forward(self, F, x):
x = self.g_conv(x)
return x
def conv2d(x,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
groups=1,
use_bias=True,
name="conv2d"):
"""
Convolution 2D layer wrapper.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
name : str, default 'conv2d'
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if (padding[0] > 0) or (padding[1] > 0):
x = tf.pad(x, [[0, 0], [0, 0], list(padding), list(padding)])
if groups == 1:
x = tf.layers.conv2d(
inputs=x,
filters=out_channels,
kernel_size=kernel_size,
strides=strides,
padding='valid',
data_format='channels_first',
use_bias=use_bias,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0),
name=name)
elif (groups == out_channels) and (out_channels == in_channels):
kernel = tf.get_variable(
name=name + '/dw_kernel',
shape=kernel_size + (in_channels, 1),
initializer=tf.variance_scaling_initializer(2.0))
x = tf.nn.depthwise_conv2d(
input=x,
filter=kernel,
strides=(1, 1) + strides,
padding='VALID',
rate=(1, 1),
name=name,
data_format='NCHW')
if use_bias:
raise NotImplementedError
else:
assert (in_channels % groups == 0)
assert (out_channels % groups == 0)
in_group_channels = in_channels // groups
out_group_channels = out_channels // groups
group_list = []
for gi in range(groups):
xi = x[:, gi * in_group_channels:(gi + 1) * in_group_channels, :, :]
xi = tf.layers.conv2d(
inputs=xi,
filters=out_group_channels,
kernel_size=kernel_size,
strides=strides,
padding='valid',
data_format='channels_first',
use_bias=use_bias,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0),
name=name + "/convgroup{}".format(gi + 1))
group_list.append(xi)
x = tf.concat(group_list, axis=1, name=name + "/concat")
return x
def tensorflow_model(x):
x = conv2d(
x=x,
in_channels=128,
out_channels=32,
kernel_size=7,
strides=2,
padding=3,
groups=GROUPS,
use_bias=False,
name="g_conv")
return x
def main():
success = True
for i in range(10):
w = np.random.randn(32, 16, 7, 7).astype(np.float32)
x = np.random.randn(10, 128, 224, 224).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_params['g_conv.weight']._load_init(mx.nd.array(w, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
xx = tf.placeholder(
dtype=tf.float32,
shape=(None, 128, 224, 224),
name='xx')
tf_model = tensorflow_model(xx)
tf_params = {v.name: v for v in tf.global_variables()}
with tf.Session() as sess:
w_list = np.split(w, axis=0, indices_or_sections=GROUPS)
for gi in range(GROUPS):
w_gi = w_list[gi]
tf_w = np.transpose(w_gi, axes=(2, 3, 1, 0))
sess.run(tf_params['g_conv/convgroup{}/kernel:0'.format(gi + 1)].assign(tf_w))
tf_y = sess.run(tf_model, feed_dict={xx: x})
tf.reset_default_graph()
dist = np.sum(np.abs(gl_y - tf_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 5,334 | 27.37766 | 94 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf2_dwconv2d.py | import numpy as np
import tensorflow as tf
import tensorflow.keras.layers as nn
channels = 12
def is_channels_first(data_format):
"""
Is tested data format channels first.
Parameters:
----------
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
Returns:
-------
bool
A flag.
"""
return data_format == "channels_first"
class TF2Model(tf.keras.Model):
def __init__(self,
data_format="channels_last",
**kwargs):
super(TF2Model, self).__init__(**kwargs)
self.conv = nn.DepthwiseConv2D(
# filters=channels,
kernel_size=(7, 7),
strides=2,
padding="same",
data_format=data_format,
dilation_rate=1,
use_bias=False,
name="conv")
def call(self, x):
x = self.conv(x)
return x
def gl_calc(gl_w, x):
import mxnet as mx
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.conv = mx.gluon.nn.Conv2D(
channels=channels,
kernel_size=(7, 7),
strides=2,
padding=(3, 3),
groups=channels,
use_bias=False,
in_channels=channels)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
# gl_w = np.transpose(tf2_w, axes=(3, 2, 0, 1))
gl_params['conv.weight']._load_init(mx.nd.array(gl_w, ctx), ctx)
# gl_params['conv.bias']._load_init(mx.nd.array(b, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
return gl_y
def main():
gpus = tf.config.experimental.list_physical_devices("GPU")
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
success = True
for i in range(10):
gl_w = np.random.randn(channels, 1, 7, 7).astype(np.float32)
# tf2_w = np.random.randn(7, 7, 1, channels).astype(np.float32)
b = np.random.randn(channels, ).astype(np.float32)
x = np.random.randn(10, channels, 224, 256).astype(np.float32)
assert (b is not None)
data_format = "channels_last"
# data_format = "channels_first"
tf2_use_cuda = True
if not tf2_use_cuda:
with tf.device("/cpu:0"):
tf2_model = TF2Model(data_format=data_format)
else:
tf2_model = TF2Model(data_format=data_format)
input_shape = (1, 224, 256, channels) if data_format == "channels_last" else (1, channels, 224, 256)
tf2_model.build(input_shape=input_shape)
tf2_params = {v.name: v for v in tf2_model.weights}
# print(tf2_params["conv/kernel:0"].shape)
# tf2_w = np.transpose(gl_w, axes=(2, 3, 1, 0))
tf2_w = np.transpose(gl_w, axes=(2, 3, 0, 1))
tf2_params["conv/depthwise_kernel:0"].assign(tf2_w)
# tf2_params["conv/bias:0"].assign(b)
tf2_x = x.transpose((0, 2, 3, 1)) if data_format == "channels_last" else x
tf2_x = tf.convert_to_tensor(tf2_x)
tf2_y = tf2_model(tf2_x).numpy()
if data_format == "channels_last":
tf2_y = tf2_y.transpose((0, 3, 1, 2))
gl_y = gl_calc(gl_w, x)
dist = np.sum(np.abs(gl_y - tf2_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == "__main__":
main()
| 3,875 | 27.291971 | 108 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf_avgpool2d.py | # import math
import numpy as np
import mxnet as mx
import tensorflow as tf
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.pool = mx.gluon.nn.AvgPool2D(
pool_size=2,
strides=2,
padding=0)
def hybrid_forward(self, F, x):
x = self.pool(x)
return x
# def avgpool2d(x,
# pool_size,
# strides,
# padding=0,
# ceil_mode=False,
# name=None):
# """
# Average pooling operation for two dimensional (spatial) data.
#
# Parameters:
# ----------
# x : Tensor
# Input tensor.
# pool_size : int or tuple/list of 2 int
# Size of the max pooling windows.
# strides : int or tuple/list of 2 int
# Strides of the pooling.
# padding : int or tuple/list of 2 int, default 0
# Padding value for convolution layer.
# ceil_mode : bool, default False
# When `True`, will use ceil instead of floor to compute the output shape.
# name : str, default 'conv2d'
# Layer name.
#
# Returns:
# -------
# Tensor
# Resulted tensor.
# """
# if isinstance(padding, int):
# padding = (padding, padding)
#
# if ceil_mode:
# height = x.shape[2]
# out_height = float(height + 2 * padding[0] - pool_size[0]) / strides[0] + 1.0
# if math.ceil(out_height) > math.floor(out_height):
# padding[0] += 1
# width = x.shape[3]
# out_width = float(width + 2 * padding[1] - pool_size[1]) / strides[1] + 1.0
# if math.ceil(out_width) > math.floor(out_width):
# padding[1] += 1
#
# if (padding[0] > 0) or (padding[1] > 0):
# x = tf.pad(x, [[0, 0], [0, 0], list(padding), list(padding)], mode="REFLECT")
#
# x = tf.layers.average_pooling2d(
# inputs=x,
# pool_size=pool_size,
# strides=strides,
# padding='valid',
# data_format='channels_first',
# name=name)
# return x
def tensorflow_model(x):
x = tf.layers.average_pooling2d(
inputs=x,
pool_size=2,
strides=2,
padding='valid',
data_format='channels_first',
name="pool")
# x = avgpool2d(
# x=x,
# pool_size=2,
# strides=2,
# padding=1,
# ceil_mode=False,
# name="pool")
return x
def main():
success = True
for i in range(10):
x = np.random.randn(10, 10, 224, 224).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
xx = tf.placeholder(
dtype=tf.float32,
shape=(None, 10, 224, 224),
name='xx')
tf_model = tensorflow_model(xx)
with tf.Session() as sess:
tf_y = sess.run(tf_model, feed_dict={xx: x})
tf.reset_default_graph()
dist = np.sum(np.abs(gl_y - tf_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 3,395 | 24.343284 | 87 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf_dwconv2d.py | import numpy as np
import mxnet as mx
import tensorflow as tf
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.dw_conv = mx.gluon.nn.Conv2D(
channels=32,
kernel_size=7,
strides=2,
padding=3,
groups=32,
use_bias=False,
in_channels=32)
def hybrid_forward(self, F, x):
x = self.dw_conv(x)
return x
def conv2d(x,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
groups=1,
use_bias=True,
name="conv2d"):
"""
Convolution 2D layer wrapper.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
name : str, default 'conv2d'
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if (padding[0] > 0) or (padding[1] > 0):
x = tf.pad(x, [[0, 0], [0, 0], list(padding), list(padding)])
if groups == 1:
x = tf.layers.conv2d(
inputs=x,
filters=out_channels,
kernel_size=kernel_size,
strides=strides,
padding='valid',
data_format='channels_first',
use_bias=use_bias,
kernel_initializer=tf.contrib.layers.variance_scaling_initializer(2.0),
name=name)
elif (groups == out_channels) and (out_channels == in_channels):
kernel = tf.get_variable(
name=name + '/dw_kernel',
shape=kernel_size + (in_channels, 1),
initializer=tf.variance_scaling_initializer(2.0))
x = tf.nn.depthwise_conv2d(
input=x,
filter=kernel,
strides=(1, 1) + strides,
padding='VALID',
rate=(1, 1),
name=name,
data_format='NCHW')
if use_bias:
raise NotImplementedError
else:
raise NotImplementedError
return x
def tensorflow_model(x):
x = conv2d(
x=x,
in_channels=32,
out_channels=32,
kernel_size=7,
strides=2,
padding=3,
groups=32,
use_bias=False,
name="dw_conv")
return x
def main():
success = True
for i in range(10):
# gl_w = np.random.randn(32, 1, 7, 7).astype(np.float32)
tf_w = np.random.randn(7, 7, 32, 1).astype(np.float32)
x = np.random.randn(10, 32, 224, 224).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_w = np.transpose(tf_w, axes=(2, 3, 0, 1))
gl_params['dw_conv.weight']._load_init(mx.nd.array(gl_w, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
xx = tf.placeholder(
dtype=tf.float32,
shape=(None, 32, 224, 224),
name='xx')
tf_model = tensorflow_model(xx)
tf_params = {v.name: v for v in tf.global_variables()}
with tf.Session() as sess:
# tf_w = np.transpose(gl_w, axes=(2, 3, 0, 1))
sess.run(tf_params['dw_conv/dw_kernel:0'].assign(tf_w))
tf_y = sess.run(tf_model, feed_dict={xx: x})
tf.reset_default_graph()
dist = np.sum(np.abs(gl_y - tf_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 4,434 | 25.716867 | 83 | py |
imgclsmob | imgclsmob-master/tests/convert_gl2tf_conv1x1.py | import numpy as np
import mxnet as mx
import tensorflow as tf
class GluonModel(mx.gluon.HybridBlock):
def __init__(self,
**kwargs):
super(GluonModel, self).__init__(**kwargs)
with self.name_scope():
self.conv = mx.gluon.nn.Conv2D(
channels=64,
kernel_size=7,
strides=2,
padding=3,
use_bias=True,
in_channels=3)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
# def tensorflow_model(x):
#
# padding = 3
# x = tf.pad(x, [[0, 0], [0, 0], [padding, padding], [padding, padding]])
# x = tf.layers.conv2d(
# inputs=x,
# filters=64,
# kernel_size=7,
# strides=2,
# padding='valid',
# data_format='channels_first',
# use_bias=False,
# name='conv')
# return x
def conv2d(x,
in_channels,
out_channels,
kernel_size,
strides=1,
padding=0,
groups=1,
use_bias=True,
name="conv2d"):
"""
Convolution 2D layer wrapper.
Parameters:
----------
x : Tensor
Input tensor.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
name : str, default 'conv2d'
Layer name.
Returns:
-------
Tensor
Resulted tensor.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
if isinstance(strides, int):
strides = (strides, strides)
if isinstance(padding, int):
padding = (padding, padding)
if groups != 1:
raise NotImplementedError
if (padding[0] > 0) or (padding[1] > 0):
x = tf.pad(x, [[0, 0], [0, 0], list(padding), list(padding)])
x = tf.layers.conv2d(
inputs=x,
filters=out_channels,
kernel_size=kernel_size,
strides=strides,
padding='valid',
data_format='channels_first',
use_bias=use_bias,
name=name)
return x
def tensorflow_model(x):
x = conv2d(
x=x,
in_channels=3,
out_channels=64,
kernel_size=7,
strides=2,
padding=3,
use_bias=True,
name="conv")
return x
def main():
success = True
for i in range(10):
# w = np.random.randint(10, size=(64, 3, 7, 7)).astype(np.float32)
# x = np.random.randint(10, size=(1, 3, 224, 224)).astype(np.float32)
w = np.random.randn(64, 3, 7, 7).astype(np.float32)
b = np.random.randn(64, ).astype(np.float32)
x = np.random.randn(10, 3, 224, 224).astype(np.float32)
gl_model = GluonModel()
# ctx = mx.cpu()
ctx = mx.gpu(0)
gl_params = gl_model._collect_params_with_prefix()
gl_params['conv.weight']._load_init(mx.nd.array(w, ctx), ctx)
gl_params['conv.bias']._load_init(mx.nd.array(b, ctx), ctx)
gl_x = mx.nd.array(x, ctx)
gl_y = gl_model(gl_x).asnumpy()
xx = tf.placeholder(
dtype=tf.float32,
shape=(None, 3, 224, 224),
name='xx')
tf_model = tensorflow_model(xx)
tf_params = {v.name: v for v in tf.global_variables()}
with tf.Session() as sess:
tf_w = np.transpose(w, axes=(2, 3, 1, 0))
sess.run(tf_params['conv/kernel:0'].assign(tf_w))
sess.run(tf_params['conv/bias:0'].assign(b))
tf_y = sess.run(tf_model, feed_dict={xx: x})
tf.reset_default_graph()
dist = np.sum(np.abs(gl_y - tf_y))
if dist > 1e-5:
success = False
print("i={}, dist={}".format(i, dist))
# print(gl_y)
# print(tf_y)
if success:
print("All ok.")
if __name__ == '__main__':
main()
| 4,267 | 24.710843 | 77 | py |
imgclsmob | imgclsmob-master/gluon/lr_scheduler.py | from math import pi, cos
from mxnet import lr_scheduler
class LRScheduler(lr_scheduler.LRScheduler):
"""
Learning Rate Scheduler
For mode='step', we multiply lr with `step_factor` at each epoch in `step`.
For mode='poly'::
lr = targetlr + (baselr - targetlr) * (1 - iter / maxiter) ^ power
For mode='cosine'::
lr = targetlr + (baselr - targetlr) * (1 + cos(pi * iter / maxiter)) / 2
If warmup_epochs > 0, a warmup stage will be inserted before the main lr scheduler.
For warmup_mode='linear'::
lr = warmup_lr + (baselr - warmup_lr) * iter / max_warmup_iter
For warmup_mode='constant'::
lr = warmup_lr
Parameters:
----------
mode : str
Modes for learning rate scheduler. Currently it supports 'step', 'poly' and 'cosine'.
base_lr : float
Base learning rate, i.e. the starting learning rate.
n_iters : int
Number of iterations in each epoch.
n_epochs : int
Number of training epochs.
step : list
A list of epochs to decay the learning rate.
step_factor : float
Learning rate decay factor.
target_lr : float
Target learning rate for poly and cosine, as the ending learning rate.
power : float
Power of poly function.
warmup_epochs : int
Number of epochs for the warmup stage.
warmup_lr : float
The base learning rate for the warmup stage.
warmup_mode : str
Modes for the warmup stage. Currently it supports 'linear' and 'constant'.
"""
def __init__(self,
mode,
base_lr,
n_iters,
n_epochs,
step=(30, 60, 90),
step_factor=0.1,
target_lr=0,
power=0.9,
warmup_epochs=0,
warmup_lr=0,
warmup_mode="linear"):
super(LRScheduler, self).__init__(base_lr=base_lr)
assert(mode in ["step", "poly", "cosine"])
assert(warmup_mode in ["constant", "linear", "poly", "cosine"])
self.mode = mode
self.learning_rate = self.base_lr
self.n_iters = n_iters
self.step = step
self.step_factor = step_factor
self.target_lr = target_lr
self.power = power
self.warmup_epochs = warmup_epochs
self.warmup_lr = warmup_lr
self.warmup_mode = warmup_mode
self.N = n_epochs * n_iters
self.warmup_N = warmup_epochs * n_iters
def __call__(self, num_update):
return self.learning_rate
def update(self, i, epoch):
t = epoch * self.n_iters + i
assert (t >= 0) and (t <= self.N)
t = float(t)
if epoch < self.warmup_epochs:
# Warm-up Stage
if self.warmup_mode == "constant":
self.learning_rate = self.warmup_lr
else:
base_lr_real = self.base_lr - self.warmup_lr
t_rel = t / self.warmup_N
if self.warmup_mode == "linear":
self.learning_rate = self.warmup_lr + base_lr_real * t_rel
elif self.warmup_mode == "poly":
self.learning_rate = self.warmup_lr + base_lr_real * pow(t_rel, self.power)
elif self.warmup_mode == "cosine":
self.learning_rate = self.warmup_lr + base_lr_real * 0.5 * (1.0 + cos(pi + pi * t_rel))
else:
raise NotImplementedError
else:
if self.mode == "step":
count = sum([1 for s in self.step if s <= epoch])
self.learning_rate = self.base_lr * pow(self.step_factor, count)
else:
base_lr_real = self.base_lr - self.target_lr
t_rel = (t - self.warmup_N) / (self.N - self.warmup_N)
if self.mode == "poly":
self.learning_rate = self.target_lr + base_lr_real * pow(1 - t_rel, self.power)
elif self.mode == "cosine":
self.learning_rate = self.target_lr + base_lr_real * (1 + cos(pi * t_rel)) / 2
else:
raise NotImplementedError
| 4,213 | 33.260163 | 107 | py |
imgclsmob | imgclsmob-master/gluon/losses.py | """
Loss functions.
"""
__all__ = ['SegSoftmaxCrossEntropyLoss', 'MixSoftmaxCrossEntropyLoss']
from mxnet.gluon.loss import Loss, _reshape_like
class SegSoftmaxCrossEntropyLoss(Loss):
"""
SoftmaxCrossEntropyLoss with ignore labels (for segmentation task).
Parameters:
----------
axis : int, default -1
The axis to sum over when computing softmax and entropy.
sparse_label : bool, default True
Whether label is an integer array instead of probability distribution.
from_logits : bool, default False
Whether input is a log probability (usually from log_softmax) instead of unnormalized numbers.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
ignore_label : int, default -1
The label to ignore.
size_average : bool, default False
Whether to re-scale loss with regard to ignored labels.
"""
def __init__(self,
sparse_label=True,
batch_axis=0,
ignore_label=-1,
size_average=True,
**kwargs):
super(SegSoftmaxCrossEntropyLoss, self).__init__(None, batch_axis, **kwargs)
self._sparse_label = sparse_label
self._ignore_label = ignore_label
self._size_average = size_average
def hybrid_forward(self, F, pred, label):
"""
Compute loss.
"""
softmaxout = F.SoftmaxOutput(
pred,
label.astype(pred.dtype),
ignore_label=self._ignore_label,
multi_output=self._sparse_label,
use_ignore=True,
normalization=("valid" if self._size_average else "null"))
if self._sparse_label:
loss = -F.pick(F.log(softmaxout), label, axis=1, keepdims=True)
else:
label = _reshape_like(F, label, pred)
loss = -F.sum(F.log(softmaxout) * label, axis=-1, keepdims=True)
loss = F.where(label.expand_dims(axis=1) == self._ignore_label, F.zeros_like(loss), loss)
return F.mean(loss, axis=self._batch_axis, exclude=True)
class MixSoftmaxCrossEntropyLoss(SegSoftmaxCrossEntropyLoss):
"""
SegSoftmaxCrossEntropyLoss with auxiliary loss support.
Parameters:
----------
aux : bool, default True
Whether to use auxiliary loss.
aux_weight : float, default 0.2
The weight for aux loss.
ignore_label : int, default -1
The label to ignore.
"""
def __init__(self,
aux=True,
aux_weight=0.2,
ignore_label=-1,
**kwargs):
super(MixSoftmaxCrossEntropyLoss, self).__init__(ignore_label=ignore_label, **kwargs)
self.aux = aux
self.aux_weight = aux_weight
def _aux_forward(self, F, pred1, pred2, label):
"""
Compute loss including auxiliary output.
"""
loss1 = super(MixSoftmaxCrossEntropyLoss, self).hybrid_forward(F, pred1, label)
loss2 = super(MixSoftmaxCrossEntropyLoss, self). hybrid_forward(F, pred2, label)
return loss1 + self.aux_weight * loss2
def hybrid_forward(self, F, preds, label, **kwargs):
"""
Compute loss.
"""
if self.aux:
return self._aux_forward(F, preds[0], preds[1], label)
else:
return super(MixSoftmaxCrossEntropyLoss, self).hybrid_forward(F, preds, label)
| 3,478 | 33.79 | 102 | py |
imgclsmob | imgclsmob-master/gluon/weighted_random_sampler.py | """
Dataset weighted random sampler.
"""
__all__ = ['WeightedRandomSampler']
import numpy as np
import mxnet as mx
from mxnet.gluon.data import Sampler
class WeightedRandomSampler(Sampler):
"""
Samples elements from [0, length) randomly without replacement.
Parameters:
----------
length : int
Length of the sequence.
weights : np.array of float
Normalized weights of samples.
"""
def __init__(self,
length,
weights):
assert (isinstance(length, int) and length > 0)
assert (len(weights) == length)
assert (np.abs(weights.sum() - 1.0) <= 1e-5)
self._length = length
self._weights = weights.copy()
def __iter__(self):
indices = mx.nd.random.multinomial(mx.nd.array(self._weights), shape=self._length).asnumpy()
np.random.shuffle(indices)
return iter(indices)
def __len__(self):
return self._length
| 969 | 23.871795 | 100 | py |
imgclsmob | imgclsmob-master/gluon/dataset_utils.py | """
Dataset routines.
"""
__all__ = ['get_dataset_metainfo', 'get_train_data_source', 'get_val_data_source', 'get_test_data_source',
'get_batch_fn']
from .datasets.imagenet1k_cls_dataset import ImageNet1KMetaInfo
from .datasets.imagenet1k_rec_cls_dataset import ImageNet1KRecMetaInfo
from .datasets.cub200_2011_cls_dataset import CUB200MetaInfo
from .datasets.cifar10_cls_dataset import CIFAR10MetaInfo
from .datasets.cifar100_cls_dataset import CIFAR100MetaInfo
from .datasets.svhn_cls_dataset import SVHNMetaInfo
from .datasets.voc_seg_dataset import VOCMetaInfo
from .datasets.ade20k_seg_dataset import ADE20KMetaInfo
from .datasets.cityscapes_seg_dataset import CityscapesMetaInfo
from .datasets.coco_seg_dataset import CocoSegMetaInfo
from .datasets.coco_det_dataset import CocoDetMetaInfo
from .datasets.widerface_det_dataset import WiderfaceDetMetaInfo
from .datasets.coco_hpe1_dataset import CocoHpe1MetaInfo
from .datasets.coco_hpe2_dataset import CocoHpe2MetaInfo
from .datasets.coco_hpe3_dataset import CocoHpe3MetaInfo
from .datasets.hpatches_mch_dataset import HPatchesMetaInfo
from .datasets.librispeech_asr_dataset import LibriSpeechMetaInfo
from .datasets.mcv_asr_dataset import McvMetaInfo
from .weighted_random_sampler import WeightedRandomSampler
from mxnet.gluon.data import DataLoader
from mxnet.gluon.utils import split_and_load
def get_dataset_metainfo(dataset_name):
"""
Get dataset metainfo by name of dataset.
Parameters:
----------
dataset_name : str
Dataset name.
Returns:
-------
DatasetMetaInfo
Dataset metainfo.
"""
dataset_metainfo_map = {
"ImageNet1K": ImageNet1KMetaInfo,
"ImageNet1K_rec": ImageNet1KRecMetaInfo,
"CUB200_2011": CUB200MetaInfo,
"CIFAR10": CIFAR10MetaInfo,
"CIFAR100": CIFAR100MetaInfo,
"SVHN": SVHNMetaInfo,
"VOC": VOCMetaInfo,
"ADE20K": ADE20KMetaInfo,
"Cityscapes": CityscapesMetaInfo,
"CocoSeg": CocoSegMetaInfo,
"CocoDet": CocoDetMetaInfo,
"WiderFace": WiderfaceDetMetaInfo,
"CocoHpe1": CocoHpe1MetaInfo,
"CocoHpe2": CocoHpe2MetaInfo,
"CocoHpe3": CocoHpe3MetaInfo,
"HPatches": HPatchesMetaInfo,
"LibriSpeech": LibriSpeechMetaInfo,
"MCV": McvMetaInfo,
}
if dataset_name in dataset_metainfo_map.keys():
return dataset_metainfo_map[dataset_name]()
else:
raise Exception("Unrecognized dataset: {}".format(dataset_name))
def get_train_data_source(ds_metainfo,
batch_size,
num_workers):
"""
Get data source for training subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
batch_size : int
Batch size.
num_workers : int
Number of background workers.
Returns:
-------
DataLoader or ImageRecordIter
Data source.
"""
if ds_metainfo.use_imgrec:
return ds_metainfo.train_imgrec_iter(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=num_workers)
else:
transform_train = ds_metainfo.train_transform(ds_metainfo=ds_metainfo)
kwargs = ds_metainfo.dataset_class_extra_kwargs if ds_metainfo.dataset_class_extra_kwargs is not None else {}
dataset = ds_metainfo.dataset_class(
root=ds_metainfo.root_dir_path,
mode="train",
transform=(transform_train if ds_metainfo.do_transform else None),
**kwargs)
if not ds_metainfo.do_transform:
if ds_metainfo.do_transform_first:
dataset = dataset.transform_first(fn=transform_train)
else:
dataset = dataset.transform(fn=transform_train)
ds_metainfo.update_from_dataset(dataset)
if not ds_metainfo.train_use_weighted_sampler:
return DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=True,
last_batch="discard",
num_workers=num_workers)
else:
sampler = WeightedRandomSampler(
length=len(dataset),
weights=dataset._data.sample_weights)
return DataLoader(
dataset=dataset,
batch_size=batch_size,
# shuffle=True,
sampler=sampler,
last_batch="discard",
batchify_fn=ds_metainfo.batchify_fn,
num_workers=num_workers)
def get_val_data_source(ds_metainfo,
batch_size,
num_workers):
"""
Get data source for validation subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
batch_size : int
Batch size.
num_workers : int
Number of background workers.
Returns:
-------
DataLoader or ImageRecordIter
Data source.
"""
if ds_metainfo.use_imgrec:
return ds_metainfo.val_imgrec_iter(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=num_workers)
else:
transform_val = ds_metainfo.val_transform(ds_metainfo=ds_metainfo)
kwargs = ds_metainfo.dataset_class_extra_kwargs if ds_metainfo.dataset_class_extra_kwargs is not None else {}
dataset = ds_metainfo.dataset_class(
root=ds_metainfo.root_dir_path,
mode="val",
transform=(transform_val if ds_metainfo.do_transform else None),
**kwargs)
if not ds_metainfo.do_transform:
if ds_metainfo.do_transform_first:
dataset = dataset.transform_first(fn=transform_val)
else:
dataset = dataset.transform(fn=transform_val)
ds_metainfo.update_from_dataset(dataset)
return DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
last_batch=ds_metainfo.batchify_fn,
batchify_fn=ds_metainfo.batchify_fn,
num_workers=num_workers)
def get_test_data_source(ds_metainfo,
batch_size,
num_workers):
"""
Get data source for testing subset.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
batch_size : int
Batch size.
num_workers : int
Number of background workers.
Returns:
-------
DataLoader or ImageRecordIter
Data source.
"""
if ds_metainfo.use_imgrec:
return ds_metainfo.val_imgrec_iter(
ds_metainfo=ds_metainfo,
batch_size=batch_size,
num_workers=num_workers)
else:
transform_test = ds_metainfo.test_transform(ds_metainfo=ds_metainfo)
kwargs = ds_metainfo.dataset_class_extra_kwargs if ds_metainfo.dataset_class_extra_kwargs is not None else {}
dataset = ds_metainfo.dataset_class(
root=ds_metainfo.root_dir_path,
mode="test",
transform=(transform_test if ds_metainfo.do_transform else None),
**kwargs)
if not ds_metainfo.do_transform:
if ds_metainfo.do_transform_first:
dataset = dataset.transform_first(fn=transform_test)
else:
dataset = dataset.transform(fn=transform_test)
ds_metainfo.update_from_dataset(dataset)
return DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
last_batch=ds_metainfo.last_batch,
batchify_fn=ds_metainfo.batchify_fn,
num_workers=num_workers)
def get_batch_fn(ds_metainfo):
"""
Get function for splitting data after extraction from data loader.
Parameters:
----------
ds_metainfo : DatasetMetaInfo
Dataset metainfo.
Returns:
-------
func
Desired function.
"""
if ds_metainfo.ml_type == "asr":
def batch_fn(batch, ctx):
# data = split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
# data2 = split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
# label = split_and_load(batch[2], ctx_list=ctx, batch_axis=0)
# return data, data2, label
# data = split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
# label = split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
# return data, label
data = split_and_load(batch[0][0], ctx_list=ctx, batch_axis=0)
data2 = split_and_load(batch[0][1], ctx_list=ctx, batch_axis=0)
label = split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
return data, data2, label
return batch_fn
elif ds_metainfo.use_imgrec:
def batch_fn(batch, ctx):
data = split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
return data, label
return batch_fn
else:
def batch_fn(batch, ctx):
data = split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
return data, label
return batch_fn
| 9,354 | 33.648148 | 117 | py |
imgclsmob | imgclsmob-master/gluon/model_stats.py | """
Routines for model statistics calculation.
"""
import logging
import numpy as np
import mxnet as mx
from mxnet.gluon import nn
from mxnet.gluon.contrib.nn import Identity, PixelShuffle2D
from .gluoncv2.models.common import ReLU6, ChannelShuffle, ChannelShuffle2, PReLU2, HSigmoid, HSwish,\
InterpolationBlock, HeatmapMaxDetBlock
from .gluoncv2.models.fishnet import ChannelSqueeze
from .gluoncv2.models.irevnet import IRevDownscale, IRevSplitBlock, IRevMergeBlock
from .gluoncv2.models.rir_cifar import RiRFinalBlock
from .gluoncv2.models.proxylessnas import ProxylessUnit
from .gluoncv2.models.lwopenpose_cmupan import LwopDecoderFinalBlock
from .gluoncv2.models.centernet import CenterNetHeatmapMaxDet
from .gluoncv2.models.danet import ScaleBlock
from .gluoncv2.models.jasper import MaskConv1d, NemoMelSpecExtractor
__all__ = ['measure_model']
def calc_block_num_params2(net):
"""
Calculate number of trainable parameters in the block (not iterative).
Parameters:
----------
net : Block
Model/block.
Returns:
-------
int
Number of parameters.
"""
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def calc_block_num_params(block):
"""
Calculate number of trainable parameters in the block (iterative).
Parameters:
----------
block : Block
Model/block.
Returns:
-------
int
Number of parameters.
"""
weight_count = 0
for param in block.params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def measure_model(model,
in_shapes,
ctx=mx.cpu()):
"""
Calculate model statistics.
Parameters:
----------
model : HybridBlock
Tested model.
in_shapes : list of tuple of ints
Shapes of the input tensors.
ctx : Context, default CPU
The context in which to load the pretrained weights.
"""
global num_flops
global num_macs
global num_params
global names
num_flops = 0
num_macs = 0
num_params = 0
names = {}
def call_hook(block, x, y):
if not (isinstance(block, IRevSplitBlock) or isinstance(block, IRevMergeBlock) or
isinstance(block, RiRFinalBlock) or isinstance(block, InterpolationBlock) or
isinstance(block, MaskConv1d) or isinstance(block, NemoMelSpecExtractor)):
assert (len(x) == 1)
assert (len(block._children) == 0)
if isinstance(block, nn.Dense):
batch = x[0].shape[0]
in_units = block._in_units
out_units = block._units
extra_num_macs = in_units * out_units
if block.bias is None:
extra_num_flops = (2 * in_units - 1) * out_units
else:
extra_num_flops = 2 * in_units * out_units
extra_num_flops *= batch
extra_num_macs *= batch
elif isinstance(block, nn.Activation):
if block._act_type == "relu":
extra_num_flops = x[0].size
extra_num_macs = 0
elif block._act_type == "sigmoid":
extra_num_flops = 4 * x[0].size
extra_num_macs = 0
else:
raise TypeError("Unknown activation type: {}".format(block._act_type))
elif isinstance(block, nn.ELU):
extra_num_flops = 3 * x[0].size
extra_num_macs = 0
elif isinstance(block, nn.LeakyReLU):
extra_num_flops = 2 * x[0].size
extra_num_macs = 0
elif isinstance(block, ReLU6):
extra_num_flops = x[0].size
extra_num_macs = 0
elif isinstance(block, PReLU2):
extra_num_flops = 3 * x[0].size
extra_num_macs = 0
elif isinstance(block, nn.Swish):
extra_num_flops = 5 * x[0].size
extra_num_macs = 0
elif isinstance(block, HSigmoid):
extra_num_flops = x[0].size
extra_num_macs = 0
elif isinstance(block, HSwish):
extra_num_flops = 2 * x[0].size
extra_num_macs = 0
elif type(block) in [nn.Conv2DTranspose]:
extra_num_flops = 4 * x[0].size
extra_num_macs = 0
elif isinstance(block, nn.Conv2D):
batch = x[0].shape[0]
x_h = x[0].shape[2]
x_w = x[0].shape[3]
kernel_size = block._kwargs["kernel"]
strides = block._kwargs["stride"]
dilation = block._kwargs["dilate"]
padding = block._kwargs["pad"]
groups = block._kwargs["num_group"]
in_channels = block._in_channels
out_channels = block._channels
y_h = (x_h + 2 * padding[0] - dilation[0] * (kernel_size[0] - 1) - 1) // strides[0] + 1
y_w = (x_w + 2 * padding[1] - dilation[1] * (kernel_size[1] - 1) - 1) // strides[1] + 1
assert (out_channels == y.shape[1])
assert (y_h == y.shape[2])
assert (y_w == y.shape[3])
kernel_total_size = kernel_size[0] * kernel_size[1]
y_size = y_h * y_w
extra_num_macs = kernel_total_size * in_channels * y_size * out_channels // groups
if block.bias is None:
extra_num_flops = (2 * kernel_total_size * y_size - 1) * in_channels * out_channels // groups
else:
extra_num_flops = 2 * kernel_total_size * in_channels * y_size * out_channels // groups
extra_num_flops *= batch
extra_num_macs *= batch
elif isinstance(block, nn.BatchNorm):
extra_num_flops = 4 * x[0].size
extra_num_macs = 0
elif isinstance(block, nn.InstanceNorm):
extra_num_flops = 4 * x[0].size
extra_num_macs = 0
elif type(block) in [nn.MaxPool2D, nn.AvgPool2D, nn.GlobalAvgPool2D, nn.GlobalMaxPool2D]:
batch = x[0].shape[0]
assert (x[0].shape[1] == y.shape[1])
pool_size = block._kwargs["kernel"]
y_h = y.shape[2]
y_w = y.shape[3]
channels = x[0].shape[1]
y_size = y_h * y_w
pool_total_size = pool_size[0] * pool_size[1]
extra_num_flops = channels * y_size * pool_total_size
extra_num_macs = 0
extra_num_flops *= batch
extra_num_macs *= batch
elif isinstance(block, nn.Dropout):
extra_num_flops = 0
extra_num_macs = 0
elif type(block) in [nn.Flatten]:
extra_num_flops = 0
extra_num_macs = 0
elif isinstance(block, nn.HybridSequential):
assert (len(block._children) == 0)
extra_num_flops = 0
extra_num_macs = 0
elif type(block) in [ChannelShuffle, ChannelShuffle2]:
extra_num_flops = x[0].size
extra_num_macs = 0
elif isinstance(block, Identity):
extra_num_flops = 0
extra_num_macs = 0
elif isinstance(block, PixelShuffle2D):
extra_num_flops = x[0].size
extra_num_macs = 0
elif isinstance(block, ChannelSqueeze):
extra_num_flops = x[0].size
extra_num_macs = 0
elif isinstance(block, IRevDownscale):
extra_num_flops = 5 * x[0].size
extra_num_macs = 0
elif isinstance(block, IRevSplitBlock):
extra_num_flops = x[0].size
extra_num_macs = 0
elif isinstance(block, IRevMergeBlock):
extra_num_flops = x[0].size
extra_num_macs = 0
elif isinstance(block, RiRFinalBlock):
extra_num_flops = x[0].size
extra_num_macs = 0
elif isinstance(block, ProxylessUnit):
extra_num_flops = x[0].size
extra_num_macs = 0
elif type(block) in [MaskConv1d, nn.Conv1D]:
if isinstance(y, tuple):
assert isinstance(block, MaskConv1d)
y = y[0]
batch = x[0].shape[0]
x_h = x[0].shape[2]
kernel_size = block._kwargs["kernel"]
strides = block._kwargs["stride"]
dilation = block._kwargs["dilate"]
padding = block._kwargs["pad"]
groups = block._kwargs["num_group"]
in_channels = block._in_channels
out_channels = block._channels
y_h = (x_h + 2 * padding[0] - dilation[0] * (kernel_size[0] - 1) - 1) // strides[0] + 1
assert (out_channels == y.shape[1])
assert (y_h == y.shape[2])
kernel_total_size = kernel_size[0]
y_size = y_h
extra_num_macs = kernel_total_size * in_channels * y_size * out_channels // groups
if block.bias is None:
extra_num_flops = (2 * kernel_total_size * y_size - 1) * in_channels * out_channels // groups
else:
extra_num_flops = 2 * kernel_total_size * in_channels * y_size * out_channels // groups
extra_num_flops *= batch
extra_num_macs *= batch
elif type(block) in [InterpolationBlock, HeatmapMaxDetBlock, CenterNetHeatmapMaxDet, ScaleBlock,
NemoMelSpecExtractor]:
extra_num_flops, extra_num_macs = block.calc_flops(x[0])
elif isinstance(block, LwopDecoderFinalBlock):
if not block.calc_3d_features:
extra_num_flops = 0
extra_num_macs = 0
else:
raise TypeError("LwopDecoderFinalBlock!")
else:
raise TypeError("Unknown layer type: {}".format(type(block)))
global num_flops
global num_macs
global num_params
global names
num_flops += extra_num_flops
num_macs += extra_num_macs
if block.name not in names:
names[block.name] = 1
num_params += calc_block_num_params(block)
def register_forward_hooks(a_block):
if len(a_block._children) > 0:
assert (calc_block_num_params(a_block) == 0)
children_handles = []
for child_block in a_block._children.values():
child_handles = register_forward_hooks(child_block)
children_handles += child_handles
return children_handles
else:
handle = a_block.register_forward_hook(call_hook)
return [handle]
hook_handles = register_forward_hooks(model)
if len(in_shapes) == 1:
x = mx.nd.zeros(in_shapes[0], ctx=ctx)
model(x)
elif len(in_shapes) == 2:
x1 = mx.nd.zeros(in_shapes[0], ctx=ctx)
x2 = mx.nd.zeros(in_shapes[1], ctx=ctx)
model(x1, x2)
else:
raise NotImplementedError()
num_params1 = calc_block_num_params2(model)
if num_params != num_params1:
logging.warning(
"Calculated numbers of parameters are different: standard method: {},\tper-leaf method: {}".format(
num_params1, num_params))
[h.detach() for h in hook_handles]
return num_flops, num_macs, num_params1
| 11,415 | 36.552632 | 111 | py |
imgclsmob | imgclsmob-master/gluon/setup.py | from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='gluoncv2',
version='0.0.64',
description='Image classification and segmentation models for Gluon',
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/osmr/imgclsmob',
author='Oleg Sémery',
author_email='osemery@gmail.com',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Image Recognition',
],
keywords='machine-learning deep-learning neuralnetwork image-classification mxnet gluon imagenet cifar svhn vgg '
'resnet pyramidnet diracnet densenet condensenet wrn drn dpn darknet fishnet espnetv2 hrnet xdensnet '
'squeezenet squeezenext shufflenet menet mobilenet igcv3 mnasnet darts xception inception polynet nasnet '
'pnasnet ror proxylessnas dianet efficientnet mixnet image-segmentation voc ade20k cityscapes coco pspnet '
'deeplabv3 fcn',
packages=find_packages(exclude=['datasets', 'metrics', 'others', '*.others', 'others.*', '*.others.*']),
include_package_data=True,
install_requires=['numpy'],
)
| 1,566 | 42.527778 | 120 | py |
imgclsmob | imgclsmob-master/gluon/utils.py | """
Main routines shared between training and evaluation scripts.
"""
__all__ = ['prepare_mx_context', 'get_initializer', 'prepare_model', 'calc_net_weight_count', 'validate',
'validate_asr', 'report_accuracy', 'get_composite_metric', 'get_metric_name', 'get_loss']
import os
import re
import logging
import numpy as np
import mxnet as mx
from mxnet.gluon.loss import SoftmaxCrossEntropyLoss
from .gluoncv2.model_provider import get_model
from .metrics.cls_metrics import Top1Error, TopKError
from .metrics.seg_metrics import PixelAccuracyMetric, MeanIoUMetric
from .metrics.det_metrics import CocoDetMApMetric, VOC07MApMetric, WiderfaceDetMetric
from .metrics.hpe_metrics import CocoHpeOksApMetric
from .metrics.asr_metrics import WER
from .losses import SegSoftmaxCrossEntropyLoss, MixSoftmaxCrossEntropyLoss
def prepare_mx_context(num_gpus,
batch_size):
"""
Prepare MXNet context and correct batch size.
Parameters:
----------
num_gpus : int
Number of GPU.
batch_size : int
Batch size for each GPU.
Returns:
-------
Context
MXNet context.
int
Batch size for all GPUs.
"""
ctx = [mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu()]
batch_size *= max(1, num_gpus)
return ctx, batch_size
def get_initializer(initializer_name):
"""
Get initializer by name.
Parameters:
----------
initializer_name : str
Initializer name.
Returns:
-------
Initializer
Initializer.
"""
if initializer_name == "MSRAPrelu":
return mx.init.MSRAPrelu()
elif initializer_name == "Xavier":
return mx.init.Xavier()
elif initializer_name == "Xavier-gaussian-out-2":
return mx.init.Xavier(
rnd_type="gaussian",
factor_type="out",
magnitude=2)
else:
return None
def prepare_model(model_name,
use_pretrained,
pretrained_model_file_path,
dtype,
net_extra_kwargs=None,
load_ignore_extra=False,
tune_layers=None,
classes=None,
in_channels=None,
do_hybridize=True,
initializer=mx.init.MSRAPrelu(),
ctx=mx.cpu()):
"""
Create and initialize model by name.
Parameters:
----------
model_name : str
Model name.
use_pretrained : bool
Whether to use pretrained weights.
pretrained_model_file_path : str
Path to file with pretrained weights.
dtype : str
Base data type for tensors.
net_extra_kwargs : dict, default None
Extra parameters for model.
load_ignore_extra : bool, default False
Whether to ignore extra layers in pretrained model.
tune_layers : dict, default False
Layers for tuning (all other will be frozen).
classes : int, default None
Number of classes.
in_channels : int, default None
Number of input channels.
do_hybridize : bool, default True
Whether to hybridize model.
initializer : Initializer
Initializer.
ctx : Context, default CPU
MXNet context.
Returns:
-------
HybridBlock
Model.
"""
kwargs = {"ctx": ctx,
"pretrained": use_pretrained}
if classes is not None:
kwargs["classes"] = classes
if in_channels is not None:
kwargs["in_channels"] = in_channels
if net_extra_kwargs is not None:
kwargs.update(net_extra_kwargs)
net = get_model(model_name, **kwargs)
if pretrained_model_file_path:
assert (os.path.isfile(pretrained_model_file_path))
logging.info("Loading model: {}".format(pretrained_model_file_path))
net.load_parameters(
filename=pretrained_model_file_path,
ctx=ctx,
ignore_extra=load_ignore_extra)
net.cast(dtype)
if do_hybridize:
net.hybridize(
static_alloc=True,
static_shape=True)
if pretrained_model_file_path or use_pretrained:
for param in net.collect_params().values():
if param._data is not None:
continue
param.initialize(initializer, ctx=ctx)
else:
net.initialize(initializer, ctx=ctx)
if (tune_layers is not None) and tune_layers:
tune_layers_pattern = re.compile(tune_layers)
for k, v in net._collect_params_with_prefix().items():
if tune_layers_pattern.match(k):
logging.info("Fine-tune parameter: {}".format(k))
else:
v.grad_req = "null"
for param in net.collect_params().values():
if param._data is not None:
continue
param.initialize(initializer, ctx=ctx)
return net
def calc_net_weight_count(net):
"""
Calculate number of model trainable parameters.
Parameters:
----------
net : HybridBlock
Model.
Returns:
-------
int
Number of parameters.
"""
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
return weight_count
def validate(metric,
net,
val_data,
batch_fn,
data_source_needs_reset,
dtype,
ctx):
"""
Core validation/testing routine.
Parameters:
----------
metric : EvalMetric
Metric object instance.
net : HybridBlock
Model.
val_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator.
batch_fn : func
Function for splitting data after extraction from data loader.
data_source_needs_reset : bool
Whether to reset data (if test_data is ImageRecordIter).
dtype : str
Base data type for tensors.
ctx : Context
MXNet context.
Returns:
-------
EvalMetric
Metric object instance.
"""
if data_source_needs_reset:
val_data.reset()
metric.reset()
for batch in val_data:
data_list, labels_list = batch_fn(batch, ctx)
outputs_list = [net(x.astype(dtype, copy=False)) for x in data_list]
metric.update(labels_list, outputs_list)
return metric
def validate_asr(metric,
net,
val_data,
batch_fn,
data_source_needs_reset,
dtype,
ctx):
"""
Core validation/testing routine for ASR.
Parameters:
----------
metric : EvalMetric
Metric object instance.
net : HybridBlock
Model.
val_data : DataLoader or ImageRecordIter
Data loader or ImRec-iterator.
batch_fn : func
Function for splitting data after extraction from data loader.
data_source_needs_reset : bool
Whether to reset data (if test_data is ImageRecordIter).
dtype : str
Base data type for tensors.
ctx : Context
MXNet context.
Returns:
-------
EvalMetric
Metric object instance.
"""
if data_source_needs_reset:
val_data.reset()
metric.reset()
for batch in val_data:
data_list, data2_list, labels_list = batch_fn(batch, ctx)
outputs_list = [net(x.astype(dtype, copy=False), x2.astype(dtype, copy=False)) for (x, x2) in
zip(data_list, data2_list)]
metric.update(labels_list, outputs_list)
return metric
def report_accuracy(metric,
extended_log=False):
"""
Make report string for composite metric.
Parameters:
----------
metric : EvalMetric
Metric object instance.
extended_log : bool, default False
Whether to log more precise accuracy values.
Returns:
-------
str
Report string.
"""
def create_msg(name, value):
if type(value) in [list, tuple]:
if extended_log:
return "{}={} ({})".format("{}", "/".join(["{:.4f}"] * len(value)), "/".join(["{}"] * len(value))).\
format(name, *(value + value))
else:
return "{}={}".format("{}", "/".join(["{:.4f}"] * len(value))).format(name, *value)
else:
if extended_log:
return "{name}={value:.4f} ({value})".format(name=name, value=value)
else:
return "{name}={value:.4f}".format(name=name, value=value)
metric_info = metric.get()
if isinstance(metric, mx.metric.CompositeEvalMetric):
msg = ", ".join([create_msg(name=m[0], value=m[1]) for m in zip(*metric_info)])
elif isinstance(metric, mx.metric.EvalMetric):
msg = create_msg(name=metric_info[0], value=metric_info[1])
else:
raise Exception("Wrong metric type: {}".format(type(metric)))
return msg
def get_metric(metric_name, metric_extra_kwargs):
"""
Get metric by name.
Parameters:
----------
metric_name : str
Metric name.
metric_extra_kwargs : dict
Metric extra parameters.
Returns:
-------
EvalMetric
Metric object instance.
"""
if metric_name == "Top1Error":
return Top1Error(**metric_extra_kwargs)
elif metric_name == "TopKError":
return TopKError(**metric_extra_kwargs)
elif metric_name == "PixelAccuracyMetric":
return PixelAccuracyMetric(**metric_extra_kwargs)
elif metric_name == "MeanIoUMetric":
return MeanIoUMetric(**metric_extra_kwargs)
elif metric_name == "CocoDetMApMetric":
return CocoDetMApMetric(**metric_extra_kwargs)
elif metric_name == "VOC07MApMetric":
return VOC07MApMetric(**metric_extra_kwargs)
elif metric_name == "WiderfaceDetMetric":
return WiderfaceDetMetric(**metric_extra_kwargs)
elif metric_name == "CocoHpeOksApMetric":
return CocoHpeOksApMetric(**metric_extra_kwargs)
elif metric_name == "WER":
return WER(**metric_extra_kwargs)
else:
raise Exception("Wrong metric name: {}".format(metric_name))
def get_composite_metric(metric_names, metric_extra_kwargs):
"""
Get composite metric by list of metric names.
Parameters:
----------
metric_names : list of str
Metric name list.
metric_extra_kwargs : list of dict
Metric extra parameters list.
Returns:
-------
CompositeEvalMetric
Metric object instance.
"""
if len(metric_names) == 1:
metric = get_metric(metric_names[0], metric_extra_kwargs[0])
else:
metric = mx.metric.CompositeEvalMetric()
for name, extra_kwargs in zip(metric_names, metric_extra_kwargs):
metric.add(get_metric(name, extra_kwargs))
return metric
def get_metric_name(metric, index):
"""
Get metric name by index in the composite metric.
Parameters:
----------
metric : CompositeEvalMetric or EvalMetric
Metric object instance.
index : int
Index.
Returns:
-------
str
Metric name.
"""
if isinstance(metric, mx.metric.CompositeEvalMetric):
return metric.metrics[index].name
elif isinstance(metric, mx.metric.EvalMetric):
assert (index == 0)
return metric.name
else:
raise Exception("Wrong metric type: {}".format(type(metric)))
def get_loss(loss_name, loss_extra_kwargs):
"""
Get loss by name.
Parameters:
----------
loss_name : str
Loss name.
loss_extra_kwargs : dict
Loss extra parameters.
Returns:
-------
Loss
Loss object instance.
"""
if loss_name == "SoftmaxCrossEntropy":
return SoftmaxCrossEntropyLoss(**loss_extra_kwargs)
if loss_name == "SegSoftmaxCrossEntropy":
return SegSoftmaxCrossEntropyLoss(**loss_extra_kwargs)
if loss_name == "MixSoftmaxCrossEntropy":
return MixSoftmaxCrossEntropyLoss(**loss_extra_kwargs)
else:
raise Exception("Wrong loss name: {}".format(loss_name))
| 12,230 | 27.444186 | 116 | py |
imgclsmob | imgclsmob-master/gluon/__init__.py | 0 | 0 | 0 | py | |
imgclsmob | imgclsmob-master/gluon/distillation.py | """
DNN distillation routines.
"""
__all__ = ['MealDiscriminator', 'MealAdvLoss']
from mxnet.gluon import nn, HybridBlock
from .gluoncv2.models.common import conv1x1, conv1x1_block
from mxnet.gluon.loss import SigmoidBinaryCrossEntropyLoss
class MealDiscriminator(HybridBlock):
"""
MEALv2 discriminator.
Parameters:
----------
classes : int, default 1000
Number of classification classes.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
classes=1000,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(MealDiscriminator, self).__init__(**kwargs)
in_channels = classes
channels = [200, 40, 8]
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
for out_channels in channels:
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = out_channels
self.output = nn.HybridSequential(prefix="")
self.output.add(conv1x1(
in_channels=in_channels,
out_channels=1,
use_bias=True))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = x.expand_dims(-1).expand_dims(-1)
x = self.features(x)
x = self.output(x)
x = x.squeeze(1)
return x
class MealAdvLoss(SigmoidBinaryCrossEntropyLoss):
"""
MEALv2 adversarial loss.
Parameters:
----------
from_sigmoid : bool, default is `False`
Whether the input is from the output of sigmoid. Set this to false will make
the loss calculate sigmoid and BCE together, which is more numerically
stable through log-sum-exp trick.
weight : float or None
Global scalar weight for loss.
batch_axis : int, default 0
The axis that represents mini-batch.
"""
def __init__(self,
**kwargs):
super(MealAdvLoss, self).__init__(**kwargs)
def hybrid_forward(self, F, pred, label, sample_weight=None, pos_weight=None):
z_pred = F.zeros_like(pred)
loss_pred = super(MealAdvLoss, self).hybrid_forward(F, pred, z_pred)
z_label = F.ones_like(label)
loss_label = super(MealAdvLoss, self).hybrid_forward(F, label, z_label)
return loss_pred + loss_label
def _test():
import numpy as np
import mxnet as mx
model = MealDiscriminator
net = model()
ctx = mx.cpu()
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
# assert (model != MealDiscriminator or weight_count == 208834)
batch = 14
classes = 1000
x = mx.nd.random.normal(shape=(batch, classes), ctx=ctx)
y = net(x)
assert (y.shape == (batch,))
loss = MealAdvLoss()
z = loss(y, 1 - y)
print(z)
pass
if __name__ == "__main__":
_test()
| 3,585 | 28.393443 | 98 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/__init__.py | 0 | 0 | 0 | py | |
imgclsmob | imgclsmob-master/gluon/gluoncv2/model_provider.py | from .models.alexnet import *
from .models.zfnet import *
from .models.vgg import *
from .models.bninception import *
from .models.resnet import *
from .models.preresnet import *
from .models.resnext import *
from .models.seresnet import *
from .models.sepreresnet import *
from .models.seresnext import *
from .models.senet import *
from .models.resnesta import *
from .models.ibnresnet import *
from .models.ibnbresnet import *
from .models.ibnresnext import *
from .models.ibndensenet import *
from .models.airnet import *
from .models.airnext import *
from .models.bamresnet import *
from .models.cbamresnet import *
from .models.resattnet import *
from .models.sknet import *
from .models.scnet import *
from .models.regnet import *
from .models.diaresnet import *
from .models.diapreresnet import *
from .models.pyramidnet import *
from .models.diracnetv2 import *
from .models.sharesnet import *
from .models.crunet import *
from .models.crunetb import *
from .models.densenet import *
from .models.condensenet import *
from .models.sparsenet import *
from .models.peleenet import *
from .models.wrn import *
from .models.drn import *
from .models.dpn import *
from .models.darknet import *
from .models.darknet53 import *
from .models.channelnet import *
from .models.isqrtcovresnet import *
from .models.irevnet import *
from .models.bagnet import *
from .models.dla import *
from .models.msdnet import *
from .models.fishnet import *
from .models.espnetv2 import *
from .models.dicenet import *
from .models.hrnet import *
from .models.vovnet import *
from .models.selecsls import *
from .models.hardnet import *
from .models.xdensenet import *
from .models.squeezenet import *
from .models.squeezenext import *
from .models.shufflenet import *
from .models.shufflenetv2 import *
from .models.shufflenetv2b import *
from .models.menet import *
from .models.mobilenet import *
from .models.mobilenetb import *
from .models.fdmobilenet import *
from .models.mobilenetv2 import *
from .models.mobilenetv3 import *
from .models.igcv3 import *
from .models.ghostnet import *
from .models.mnasnet import *
from .models.darts import *
from .models.proxylessnas import *
from .models.fbnet import *
from .models.xception import *
from .models.inceptionv3 import *
from .models.inceptionv4 import *
from .models.inceptionresnetv1 import *
from .models.inceptionresnetv2 import *
from .models.polynet import *
from .models.nasnet import *
from .models.pnasnet import *
from .models.spnasnet import *
from .models.efficientnet import *
from .models.efficientnetedge import *
from .models.mixnet import *
from .models.nin_cifar import *
from .models.resnet_cifar import *
from .models.preresnet_cifar import *
from .models.resnext_cifar import *
from .models.seresnet_cifar import *
from .models.sepreresnet_cifar import *
from .models.pyramidnet_cifar import *
from .models.densenet_cifar import *
from .models.xdensenet_cifar import *
from .models.wrn_cifar import *
from .models.wrn1bit_cifar import *
from .models.ror_cifar import *
from .models.rir_cifar import *
from .models.resdropresnet_cifar import *
from .models.shakeshakeresnet_cifar import *
from .models.shakedropresnet_cifar import *
from .models.fractalnet_cifar import *
from .models.diaresnet_cifar import *
from .models.diapreresnet_cifar import *
from .models.octresnet import *
from .models.octresnet_cifar import *
from .models.res2net import *
from .models.resneta import *
from .models.resnetd import *
from .models.fastseresnet import *
from .models.resnet_cub import *
from .models.seresnet_cub import *
from .models.mobilenet_cub import *
from .models.proxylessnas_cub import *
from .models.ntsnet_cub import *
from .models.fcn8sd import *
from .models.pspnet import *
from .models.deeplabv3 import *
from .models.icnet import *
from .models.fastscnn import *
from .models.cgnet import *
from .models.dabnet import *
from .models.sinet import *
from .models.bisenet import *
from .models.danet import *
from .models.fpenet import *
from .models.lednet import *
from .models.superpointnet import *
from .models.alphapose_coco import *
from .models.simplepose_coco import *
from .models.simpleposemobile_coco import *
from .models.lwopenpose_cmupan import *
from .models.ibppose_coco import *
from .models.centernet import *
from .models.lffd import *
from .models.visemenet import *
from .models.voca import *
from .models.nvpattexp import *
from .models.jasper import *
from .models.jasperdr import *
from .models.quartznet import *
# from .models.others.oth_simple_pose_resnet import *
# from .models.others.oth_mobile_pose import *
# from .models.others.oth_alpha_pose import *
# from .models.others.oth_icnet import *
# from .models.others.oth_centernet import *
# from .models.others.oth_resnest import *
# from .models.others.oth_danet import *
# from .models.others.oth_fastscnn import *
from .models.regnetv import *
__all__ = ['get_model']
_models = {
'alexnet': alexnet,
'alexnetb': alexnetb,
'zfnet': zfnet,
'zfnetb': zfnetb,
'vgg11': vgg11,
'vgg13': vgg13,
'vgg16': vgg16,
'vgg19': vgg19,
'bn_vgg11': bn_vgg11,
'bn_vgg13': bn_vgg13,
'bn_vgg16': bn_vgg16,
'bn_vgg19': bn_vgg19,
'bn_vgg11b': bn_vgg11b,
'bn_vgg13b': bn_vgg13b,
'bn_vgg16b': bn_vgg16b,
'bn_vgg19b': bn_vgg19b,
'bninception': bninception,
'resnet10': resnet10,
'resnet12': resnet12,
'resnet14': resnet14,
'resnetbc14b': resnetbc14b,
'resnet16': resnet16,
'resnet18_wd4': resnet18_wd4,
'resnet18_wd2': resnet18_wd2,
'resnet18_w3d4': resnet18_w3d4,
'resnet18': resnet18,
'resnet26': resnet26,
'resnetbc26b': resnetbc26b,
'resnet34': resnet34,
'resnetbc38b': resnetbc38b,
'resnet50': resnet50,
'resnet50b': resnet50b,
'resnet101': resnet101,
'resnet101b': resnet101b,
'resnet152': resnet152,
'resnet152b': resnet152b,
'resnet200': resnet200,
'resnet200b': resnet200b,
'preresnet10': preresnet10,
'preresnet12': preresnet12,
'preresnet14': preresnet14,
'preresnetbc14b': preresnetbc14b,
'preresnet16': preresnet16,
'preresnet18_wd4': preresnet18_wd4,
'preresnet18_wd2': preresnet18_wd2,
'preresnet18_w3d4': preresnet18_w3d4,
'preresnet18': preresnet18,
'preresnet26': preresnet26,
'preresnetbc26b': preresnetbc26b,
'preresnet34': preresnet34,
'preresnetbc38b': preresnetbc38b,
'preresnet50': preresnet50,
'preresnet50b': preresnet50b,
'preresnet101': preresnet101,
'preresnet101b': preresnet101b,
'preresnet152': preresnet152,
'preresnet152b': preresnet152b,
'preresnet200': preresnet200,
'preresnet200b': preresnet200b,
'preresnet269b': preresnet269b,
'resnext14_16x4d': resnext14_16x4d,
'resnext14_32x2d': resnext14_32x2d,
'resnext14_32x4d': resnext14_32x4d,
'resnext26_16x4d': resnext26_16x4d,
'resnext26_32x2d': resnext26_32x2d,
'resnext26_32x4d': resnext26_32x4d,
'resnext38_32x4d': resnext38_32x4d,
'resnext50_32x4d': resnext50_32x4d,
'resnext101_32x4d': resnext101_32x4d,
'resnext101_64x4d': resnext101_64x4d,
'seresnet10': seresnet10,
'seresnet12': seresnet12,
'seresnet14': seresnet14,
'seresnet16': seresnet16,
'seresnet18': seresnet18,
'seresnet26': seresnet26,
'seresnetbc26b': seresnetbc26b,
'seresnet34': seresnet34,
'seresnetbc38b': seresnetbc38b,
'seresnet50': seresnet50,
'seresnet50b': seresnet50b,
'seresnet101': seresnet101,
'seresnet101b': seresnet101b,
'seresnet152': seresnet152,
'seresnet152b': seresnet152b,
'seresnet200': seresnet200,
'seresnet200b': seresnet200b,
'sepreresnet10': sepreresnet10,
'sepreresnet12': sepreresnet12,
'sepreresnet14': sepreresnet14,
'sepreresnet16': sepreresnet16,
'sepreresnet18': sepreresnet18,
'sepreresnet26': sepreresnet26,
'sepreresnetbc26b': sepreresnetbc26b,
'sepreresnet34': sepreresnet34,
'sepreresnetbc38b': sepreresnetbc38b,
'sepreresnet50': sepreresnet50,
'sepreresnet50b': sepreresnet50b,
'sepreresnet101': sepreresnet101,
'sepreresnet101b': sepreresnet101b,
'sepreresnet152': sepreresnet152,
'sepreresnet152b': sepreresnet152b,
'sepreresnet200': sepreresnet200,
'sepreresnet200b': sepreresnet200b,
'seresnext50_32x4d': seresnext50_32x4d,
'seresnext101_32x4d': seresnext101_32x4d,
'seresnext101_64x4d': seresnext101_64x4d,
'senet16': senet16,
'senet28': senet28,
'senet40': senet40,
'senet52': senet52,
'senet103': senet103,
'senet154': senet154,
'resnestabc14': resnestabc14,
'resnesta18': resnesta18,
'resnestabc26': resnestabc26,
'resnestabc38': resnestabc38,
'resnesta50': resnesta50,
'resnesta101': resnesta101,
'resnesta152': resnesta152,
'resnesta200': resnesta200,
'resnesta269': resnesta269,
'ibn_resnet50': ibn_resnet50,
'ibn_resnet101': ibn_resnet101,
'ibn_resnet152': ibn_resnet152,
'ibnb_resnet50': ibnb_resnet50,
'ibnb_resnet101': ibnb_resnet101,
'ibnb_resnet152': ibnb_resnet152,
'ibn_resnext50_32x4d': ibn_resnext50_32x4d,
'ibn_resnext101_32x4d': ibn_resnext101_32x4d,
'ibn_resnext101_64x4d': ibn_resnext101_64x4d,
'ibn_densenet121': ibn_densenet121,
'ibn_densenet161': ibn_densenet161,
'ibn_densenet169': ibn_densenet169,
'ibn_densenet201': ibn_densenet201,
'airnet50_1x64d_r2': airnet50_1x64d_r2,
'airnet50_1x64d_r16': airnet50_1x64d_r16,
'airnet101_1x64d_r2': airnet101_1x64d_r2,
'airnext50_32x4d_r2': airnext50_32x4d_r2,
'airnext101_32x4d_r2': airnext101_32x4d_r2,
'airnext101_32x4d_r16': airnext101_32x4d_r16,
'bam_resnet18': bam_resnet18,
'bam_resnet34': bam_resnet34,
'bam_resnet50': bam_resnet50,
'bam_resnet101': bam_resnet101,
'bam_resnet152': bam_resnet152,
'cbam_resnet18': cbam_resnet18,
'cbam_resnet34': cbam_resnet34,
'cbam_resnet50': cbam_resnet50,
'cbam_resnet101': cbam_resnet101,
'cbam_resnet152': cbam_resnet152,
'resattnet56': resattnet56,
'resattnet92': resattnet92,
'resattnet128': resattnet128,
'resattnet164': resattnet164,
'resattnet200': resattnet200,
'resattnet236': resattnet236,
'resattnet452': resattnet452,
'sknet50': sknet50,
'sknet101': sknet101,
'sknet152': sknet152,
'scnet50': scnet50,
'scnet101': scnet101,
'scneta50': scneta50,
'scneta101': scneta101,
'regnetx002': regnetx002,
'regnetx004': regnetx004,
'regnetx006': regnetx006,
'regnetx008': regnetx008,
'regnetx016': regnetx016,
'regnetx032': regnetx032,
'regnetx040': regnetx040,
'regnetx064': regnetx064,
'regnetx080': regnetx080,
'regnetx120': regnetx120,
'regnetx160': regnetx160,
'regnetx320': regnetx320,
'regnety002': regnety002,
'regnety004': regnety004,
'regnety006': regnety006,
'regnety008': regnety008,
'regnety016': regnety016,
'regnety032': regnety032,
'regnety040': regnety040,
'regnety064': regnety064,
'regnety080': regnety080,
'regnety120': regnety120,
'regnety160': regnety160,
'regnety320': regnety320,
'regnetz002': regnetz002,
'regnetw002': regnetw002,
'diaresnet10': diaresnet10,
'diaresnet12': diaresnet12,
'diaresnet14': diaresnet14,
'diaresnetbc14b': diaresnetbc14b,
'diaresnet16': diaresnet16,
'diaresnet18': diaresnet18,
'diaresnet26': diaresnet26,
'diaresnetbc26b': diaresnetbc26b,
'diaresnet34': diaresnet34,
'diaresnetbc38b': diaresnetbc38b,
'diaresnet50': diaresnet50,
'diaresnet50b': diaresnet50b,
'diaresnet101': diaresnet101,
'diaresnet101b': diaresnet101b,
'diaresnet152': diaresnet152,
'diaresnet152b': diaresnet152b,
'diaresnet200': diaresnet200,
'diaresnet200b': diaresnet200b,
'diapreresnet10': diapreresnet10,
'diapreresnet12': diapreresnet12,
'diapreresnet14': diapreresnet14,
'diapreresnetbc14b': diapreresnetbc14b,
'diapreresnet16': diapreresnet16,
'diapreresnet18': diapreresnet18,
'diapreresnet26': diapreresnet26,
'diapreresnetbc26b': diapreresnetbc26b,
'diapreresnet34': diapreresnet34,
'diapreresnetbc38b': diapreresnetbc38b,
'diapreresnet50': diapreresnet50,
'diapreresnet50b': diapreresnet50b,
'diapreresnet101': diapreresnet101,
'diapreresnet101b': diapreresnet101b,
'diapreresnet152': diapreresnet152,
'diapreresnet152b': diapreresnet152b,
'diapreresnet200': diapreresnet200,
'diapreresnet200b': diapreresnet200b,
'diapreresnet269b': diapreresnet269b,
'pyramidnet101_a360': pyramidnet101_a360,
'diracnet18v2': diracnet18v2,
'diracnet34v2': diracnet34v2,
'sharesnet18': sharesnet18,
'sharesnet34': sharesnet34,
'sharesnet50': sharesnet50,
'sharesnet50b': sharesnet50b,
'sharesnet101': sharesnet101,
'sharesnet101b': sharesnet101b,
'sharesnet152': sharesnet152,
'sharesnet152b': sharesnet152b,
'crunet56': crunet56,
'crunet116': crunet116,
'crunet56b': crunet56b,
'crunet116b': crunet116b,
'densenet121': densenet121,
'densenet161': densenet161,
'densenet169': densenet169,
'densenet201': densenet201,
'condensenet74_c4_g4': condensenet74_c4_g4,
'condensenet74_c8_g8': condensenet74_c8_g8,
'sparsenet121': sparsenet121,
'sparsenet161': sparsenet161,
'sparsenet169': sparsenet169,
'sparsenet201': sparsenet201,
'sparsenet264': sparsenet264,
'peleenet': peleenet,
'wrn50_2': wrn50_2,
'drnc26': drnc26,
'drnc42': drnc42,
'drnc58': drnc58,
'drnd22': drnd22,
'drnd38': drnd38,
'drnd54': drnd54,
'drnd105': drnd105,
'dpn68': dpn68,
'dpn68b': dpn68b,
'dpn98': dpn98,
'dpn107': dpn107,
'dpn131': dpn131,
'darknet_ref': darknet_ref,
'darknet_tiny': darknet_tiny,
'darknet19': darknet19,
'darknet53': darknet53,
'channelnet': channelnet,
'irevnet301': irevnet301,
'bagnet9': bagnet9,
'bagnet17': bagnet17,
'bagnet33': bagnet33,
'dla34': dla34,
'dla46c': dla46c,
'dla46xc': dla46xc,
'dla60': dla60,
'dla60x': dla60x,
'dla60xc': dla60xc,
'dla102': dla102,
'dla102x': dla102x,
'dla102x2': dla102x2,
'dla169': dla169,
'msdnet22': msdnet22,
'fishnet99': fishnet99,
'fishnet150': fishnet150,
'espnetv2_wd2': espnetv2_wd2,
'espnetv2_w1': espnetv2_w1,
'espnetv2_w5d4': espnetv2_w5d4,
'espnetv2_w3d2': espnetv2_w3d2,
'espnetv2_w2': espnetv2_w2,
'dicenet_wd5': dicenet_wd5,
'dicenet_wd2': dicenet_wd2,
'dicenet_w3d4': dicenet_w3d4,
'dicenet_w1': dicenet_w1,
'dicenet_w5d4': dicenet_w5d4,
'dicenet_w3d2': dicenet_w3d2,
'dicenet_w7d8': dicenet_w7d8,
'dicenet_w2': dicenet_w2,
'hrnet_w18_small_v1': hrnet_w18_small_v1,
'hrnet_w18_small_v2': hrnet_w18_small_v2,
'hrnetv2_w18': hrnetv2_w18,
'hrnetv2_w30': hrnetv2_w30,
'hrnetv2_w32': hrnetv2_w32,
'hrnetv2_w40': hrnetv2_w40,
'hrnetv2_w44': hrnetv2_w44,
'hrnetv2_w48': hrnetv2_w48,
'hrnetv2_w64': hrnetv2_w64,
'vovnet27s': vovnet27s,
'vovnet39': vovnet39,
'vovnet57': vovnet57,
'selecsls42': selecsls42,
'selecsls42b': selecsls42b,
'selecsls60': selecsls60,
'selecsls60b': selecsls60b,
'selecsls84': selecsls84,
'hardnet39ds': hardnet39ds,
'hardnet68ds': hardnet68ds,
'hardnet68': hardnet68,
'hardnet85': hardnet85,
'xdensenet121_2': xdensenet121_2,
'xdensenet161_2': xdensenet161_2,
'xdensenet169_2': xdensenet169_2,
'xdensenet201_2': xdensenet201_2,
'squeezenet_v1_0': squeezenet_v1_0,
'squeezenet_v1_1': squeezenet_v1_1,
'squeezeresnet_v1_0': squeezeresnet_v1_0,
'squeezeresnet_v1_1': squeezeresnet_v1_1,
'sqnxt23_w1': sqnxt23_w1,
'sqnxt23_w3d2': sqnxt23_w3d2,
'sqnxt23_w2': sqnxt23_w2,
'sqnxt23v5_w1': sqnxt23v5_w1,
'sqnxt23v5_w3d2': sqnxt23v5_w3d2,
'sqnxt23v5_w2': sqnxt23v5_w2,
'shufflenet_g1_w1': shufflenet_g1_w1,
'shufflenet_g2_w1': shufflenet_g2_w1,
'shufflenet_g3_w1': shufflenet_g3_w1,
'shufflenet_g4_w1': shufflenet_g4_w1,
'shufflenet_g8_w1': shufflenet_g8_w1,
'shufflenet_g1_w3d4': shufflenet_g1_w3d4,
'shufflenet_g3_w3d4': shufflenet_g3_w3d4,
'shufflenet_g1_wd2': shufflenet_g1_wd2,
'shufflenet_g3_wd2': shufflenet_g3_wd2,
'shufflenet_g1_wd4': shufflenet_g1_wd4,
'shufflenet_g3_wd4': shufflenet_g3_wd4,
'shufflenetv2_wd2': shufflenetv2_wd2,
'shufflenetv2_w1': shufflenetv2_w1,
'shufflenetv2_w3d2': shufflenetv2_w3d2,
'shufflenetv2_w2': shufflenetv2_w2,
'shufflenetv2b_wd2': shufflenetv2b_wd2,
'shufflenetv2b_w1': shufflenetv2b_w1,
'shufflenetv2b_w3d2': shufflenetv2b_w3d2,
'shufflenetv2b_w2': shufflenetv2b_w2,
'menet108_8x1_g3': menet108_8x1_g3,
'menet128_8x1_g4': menet128_8x1_g4,
'menet160_8x1_g8': menet160_8x1_g8,
'menet228_12x1_g3': menet228_12x1_g3,
'menet256_12x1_g4': menet256_12x1_g4,
'menet348_12x1_g3': menet348_12x1_g3,
'menet352_12x1_g8': menet352_12x1_g8,
'menet456_24x1_g3': menet456_24x1_g3,
'mobilenet_w1': mobilenet_w1,
'mobilenet_w3d4': mobilenet_w3d4,
'mobilenet_wd2': mobilenet_wd2,
'mobilenet_wd4': mobilenet_wd4,
'mobilenetb_w1': mobilenetb_w1,
'mobilenetb_w3d4': mobilenetb_w3d4,
'mobilenetb_wd2': mobilenetb_wd2,
'mobilenetb_wd4': mobilenetb_wd4,
'fdmobilenet_w1': fdmobilenet_w1,
'fdmobilenet_w3d4': fdmobilenet_w3d4,
'fdmobilenet_wd2': fdmobilenet_wd2,
'fdmobilenet_wd4': fdmobilenet_wd4,
'mobilenetv2_w1': mobilenetv2_w1,
'mobilenetv2_w3d4': mobilenetv2_w3d4,
'mobilenetv2_wd2': mobilenetv2_wd2,
'mobilenetv2_wd4': mobilenetv2_wd4,
'mobilenetv2b_w1': mobilenetv2b_w1,
'mobilenetv2b_w3d4': mobilenetv2b_w3d4,
'mobilenetv2b_wd2': mobilenetv2b_wd2,
'mobilenetv2b_wd4': mobilenetv2b_wd4,
'mobilenetv3_small_w7d20': mobilenetv3_small_w7d20,
'mobilenetv3_small_wd2': mobilenetv3_small_wd2,
'mobilenetv3_small_w3d4': mobilenetv3_small_w3d4,
'mobilenetv3_small_w1': mobilenetv3_small_w1,
'mobilenetv3_small_w5d4': mobilenetv3_small_w5d4,
'mobilenetv3_large_w7d20': mobilenetv3_large_w7d20,
'mobilenetv3_large_wd2': mobilenetv3_large_wd2,
'mobilenetv3_large_w3d4': mobilenetv3_large_w3d4,
'mobilenetv3_large_w1': mobilenetv3_large_w1,
'mobilenetv3_large_w5d4': mobilenetv3_large_w5d4,
'igcv3_w1': igcv3_w1,
'igcv3_w3d4': igcv3_w3d4,
'igcv3_wd2': igcv3_wd2,
'igcv3_wd4': igcv3_wd4,
'ghostnet': ghostnet,
'mnasnet_b1': mnasnet_b1,
'mnasnet_a1': mnasnet_a1,
'mnasnet_small': mnasnet_small,
'darts': darts,
'proxylessnas_cpu': proxylessnas_cpu,
'proxylessnas_gpu': proxylessnas_gpu,
'proxylessnas_mobile': proxylessnas_mobile,
'proxylessnas_mobile14': proxylessnas_mobile14,
'fbnet_cb': fbnet_cb,
'xception': xception,
'inceptionv3': inceptionv3,
'inceptionv3_gl': inceptionv3_gl,
'inceptionv4': inceptionv4,
'inceptionresnetv1': inceptionresnetv1,
'inceptionresnetv2': inceptionresnetv2,
'polynet': polynet,
'nasnet_4a1056': nasnet_4a1056,
'nasnet_6a4032': nasnet_6a4032,
'pnasnet5large': pnasnet5large,
'spnasnet': spnasnet,
'efficientnet_b0': efficientnet_b0,
'efficientnet_b1': efficientnet_b1,
'efficientnet_b2': efficientnet_b2,
'efficientnet_b3': efficientnet_b3,
'efficientnet_b4': efficientnet_b4,
'efficientnet_b5': efficientnet_b5,
'efficientnet_b6': efficientnet_b6,
'efficientnet_b7': efficientnet_b7,
'efficientnet_b8': efficientnet_b8,
'efficientnet_b0b': efficientnet_b0b,
'efficientnet_b1b': efficientnet_b1b,
'efficientnet_b2b': efficientnet_b2b,
'efficientnet_b3b': efficientnet_b3b,
'efficientnet_b4b': efficientnet_b4b,
'efficientnet_b5b': efficientnet_b5b,
'efficientnet_b6b': efficientnet_b6b,
'efficientnet_b7b': efficientnet_b7b,
'efficientnet_b0c': efficientnet_b0c,
'efficientnet_b1c': efficientnet_b1c,
'efficientnet_b2c': efficientnet_b2c,
'efficientnet_b3c': efficientnet_b3c,
'efficientnet_b4c': efficientnet_b4c,
'efficientnet_b5c': efficientnet_b5c,
'efficientnet_b6c': efficientnet_b6c,
'efficientnet_b7c': efficientnet_b7c,
'efficientnet_b8c': efficientnet_b8c,
'efficientnet_edge_small_b': efficientnet_edge_small_b,
'efficientnet_edge_medium_b': efficientnet_edge_medium_b,
'efficientnet_edge_large_b': efficientnet_edge_large_b,
'mixnet_s': mixnet_s,
'mixnet_m': mixnet_m,
'mixnet_l': mixnet_l,
'nin_cifar10': nin_cifar10,
'nin_cifar100': nin_cifar100,
'nin_svhn': nin_svhn,
'resnet20_cifar10': resnet20_cifar10,
'resnet20_cifar100': resnet20_cifar100,
'resnet20_svhn': resnet20_svhn,
'resnet56_cifar10': resnet56_cifar10,
'resnet56_cifar100': resnet56_cifar100,
'resnet56_svhn': resnet56_svhn,
'resnet110_cifar10': resnet110_cifar10,
'resnet110_cifar100': resnet110_cifar100,
'resnet110_svhn': resnet110_svhn,
'resnet164bn_cifar10': resnet164bn_cifar10,
'resnet164bn_cifar100': resnet164bn_cifar100,
'resnet164bn_svhn': resnet164bn_svhn,
'resnet272bn_cifar10': resnet272bn_cifar10,
'resnet272bn_cifar100': resnet272bn_cifar100,
'resnet272bn_svhn': resnet272bn_svhn,
'resnet542bn_cifar10': resnet542bn_cifar10,
'resnet542bn_cifar100': resnet542bn_cifar100,
'resnet542bn_svhn': resnet542bn_svhn,
'resnet1001_cifar10': resnet1001_cifar10,
'resnet1001_cifar100': resnet1001_cifar100,
'resnet1001_svhn': resnet1001_svhn,
'resnet1202_cifar10': resnet1202_cifar10,
'resnet1202_cifar100': resnet1202_cifar100,
'resnet1202_svhn': resnet1202_svhn,
'preresnet20_cifar10': preresnet20_cifar10,
'preresnet20_cifar100': preresnet20_cifar100,
'preresnet20_svhn': preresnet20_svhn,
'preresnet56_cifar10': preresnet56_cifar10,
'preresnet56_cifar100': preresnet56_cifar100,
'preresnet56_svhn': preresnet56_svhn,
'preresnet110_cifar10': preresnet110_cifar10,
'preresnet110_cifar100': preresnet110_cifar100,
'preresnet110_svhn': preresnet110_svhn,
'preresnet164bn_cifar10': preresnet164bn_cifar10,
'preresnet164bn_cifar100': preresnet164bn_cifar100,
'preresnet164bn_svhn': preresnet164bn_svhn,
'preresnet272bn_cifar10': preresnet272bn_cifar10,
'preresnet272bn_cifar100': preresnet272bn_cifar100,
'preresnet272bn_svhn': preresnet272bn_svhn,
'preresnet542bn_cifar10': preresnet542bn_cifar10,
'preresnet542bn_cifar100': preresnet542bn_cifar100,
'preresnet542bn_svhn': preresnet542bn_svhn,
'preresnet1001_cifar10': preresnet1001_cifar10,
'preresnet1001_cifar100': preresnet1001_cifar100,
'preresnet1001_svhn': preresnet1001_svhn,
'preresnet1202_cifar10': preresnet1202_cifar10,
'preresnet1202_cifar100': preresnet1202_cifar100,
'preresnet1202_svhn': preresnet1202_svhn,
'resnext20_1x64d_cifar10': resnext20_1x64d_cifar10,
'resnext20_1x64d_cifar100': resnext20_1x64d_cifar100,
'resnext20_1x64d_svhn': resnext20_1x64d_svhn,
'resnext20_2x32d_cifar10': resnext20_2x32d_cifar10,
'resnext20_2x32d_cifar100': resnext20_2x32d_cifar100,
'resnext20_2x32d_svhn': resnext20_2x32d_svhn,
'resnext20_2x64d_cifar10': resnext20_2x64d_cifar10,
'resnext20_2x64d_cifar100': resnext20_2x64d_cifar100,
'resnext20_2x64d_svhn': resnext20_2x64d_svhn,
'resnext20_4x16d_cifar10': resnext20_4x16d_cifar10,
'resnext20_4x16d_cifar100': resnext20_4x16d_cifar100,
'resnext20_4x16d_svhn': resnext20_4x16d_svhn,
'resnext20_4x32d_cifar10': resnext20_4x32d_cifar10,
'resnext20_4x32d_cifar100': resnext20_4x32d_cifar100,
'resnext20_4x32d_svhn': resnext20_4x32d_svhn,
'resnext20_8x8d_cifar10': resnext20_8x8d_cifar10,
'resnext20_8x8d_cifar100': resnext20_8x8d_cifar100,
'resnext20_8x8d_svhn': resnext20_8x8d_svhn,
'resnext20_8x16d_cifar10': resnext20_8x16d_cifar10,
'resnext20_8x16d_cifar100': resnext20_8x16d_cifar100,
'resnext20_8x16d_svhn': resnext20_8x16d_svhn,
'resnext20_16x4d_cifar10': resnext20_16x4d_cifar10,
'resnext20_16x4d_cifar100': resnext20_16x4d_cifar100,
'resnext20_16x4d_svhn': resnext20_16x4d_svhn,
'resnext20_16x8d_cifar10': resnext20_16x8d_cifar10,
'resnext20_16x8d_cifar100': resnext20_16x8d_cifar100,
'resnext20_16x8d_svhn': resnext20_16x8d_svhn,
'resnext20_32x2d_cifar10': resnext20_32x2d_cifar10,
'resnext20_32x2d_cifar100': resnext20_32x2d_cifar100,
'resnext20_32x2d_svhn': resnext20_32x2d_svhn,
'resnext20_32x4d_cifar10': resnext20_32x4d_cifar10,
'resnext20_32x4d_cifar100': resnext20_32x4d_cifar100,
'resnext20_32x4d_svhn': resnext20_32x4d_svhn,
'resnext20_64x1d_cifar10': resnext20_64x1d_cifar10,
'resnext20_64x1d_cifar100': resnext20_64x1d_cifar100,
'resnext20_64x1d_svhn': resnext20_64x1d_svhn,
'resnext20_64x2d_cifar10': resnext20_64x2d_cifar10,
'resnext20_64x2d_cifar100': resnext20_64x2d_cifar100,
'resnext20_64x2d_svhn': resnext20_64x2d_svhn,
'resnext29_32x4d_cifar10': resnext29_32x4d_cifar10,
'resnext29_32x4d_cifar100': resnext29_32x4d_cifar100,
'resnext29_32x4d_svhn': resnext29_32x4d_svhn,
'resnext29_16x64d_cifar10': resnext29_16x64d_cifar10,
'resnext29_16x64d_cifar100': resnext29_16x64d_cifar100,
'resnext29_16x64d_svhn': resnext29_16x64d_svhn,
'resnext56_1x64d_cifar10': resnext56_1x64d_cifar10,
'resnext56_1x64d_cifar100': resnext56_1x64d_cifar100,
'resnext56_1x64d_svhn': resnext56_1x64d_svhn,
'resnext56_2x32d_cifar10': resnext56_2x32d_cifar10,
'resnext56_2x32d_cifar100': resnext56_2x32d_cifar100,
'resnext56_2x32d_svhn': resnext56_2x32d_svhn,
'resnext56_4x16d_cifar10': resnext56_4x16d_cifar10,
'resnext56_4x16d_cifar100': resnext56_4x16d_cifar100,
'resnext56_4x16d_svhn': resnext56_4x16d_svhn,
'resnext56_8x8d_cifar10': resnext56_8x8d_cifar10,
'resnext56_8x8d_cifar100': resnext56_8x8d_cifar100,
'resnext56_8x8d_svhn': resnext56_8x8d_svhn,
'resnext56_16x4d_cifar10': resnext56_16x4d_cifar10,
'resnext56_16x4d_cifar100': resnext56_16x4d_cifar100,
'resnext56_16x4d_svhn': resnext56_16x4d_svhn,
'resnext56_32x2d_cifar10': resnext56_32x2d_cifar10,
'resnext56_32x2d_cifar100': resnext56_32x2d_cifar100,
'resnext56_32x2d_svhn': resnext56_32x2d_svhn,
'resnext56_64x1d_cifar10': resnext56_64x1d_cifar10,
'resnext56_64x1d_cifar100': resnext56_64x1d_cifar100,
'resnext56_64x1d_svhn': resnext56_64x1d_svhn,
'resnext272_1x64d_cifar10': resnext272_1x64d_cifar10,
'resnext272_1x64d_cifar100': resnext272_1x64d_cifar100,
'resnext272_1x64d_svhn': resnext272_1x64d_svhn,
'resnext272_2x32d_cifar10': resnext272_2x32d_cifar10,
'resnext272_2x32d_cifar100': resnext272_2x32d_cifar100,
'resnext272_2x32d_svhn': resnext272_2x32d_svhn,
'seresnet20_cifar10': seresnet20_cifar10,
'seresnet20_cifar100': seresnet20_cifar100,
'seresnet20_svhn': seresnet20_svhn,
'seresnet56_cifar10': seresnet56_cifar10,
'seresnet56_cifar100': seresnet56_cifar100,
'seresnet56_svhn': seresnet56_svhn,
'seresnet110_cifar10': seresnet110_cifar10,
'seresnet110_cifar100': seresnet110_cifar100,
'seresnet110_svhn': seresnet110_svhn,
'seresnet164bn_cifar10': seresnet164bn_cifar10,
'seresnet164bn_cifar100': seresnet164bn_cifar100,
'seresnet164bn_svhn': seresnet164bn_svhn,
'seresnet272bn_cifar10': seresnet272bn_cifar10,
'seresnet272bn_cifar100': seresnet272bn_cifar100,
'seresnet272bn_svhn': seresnet272bn_svhn,
'seresnet542bn_cifar10': seresnet542bn_cifar10,
'seresnet542bn_cifar100': seresnet542bn_cifar100,
'seresnet542bn_svhn': seresnet542bn_svhn,
'seresnet1001_cifar10': seresnet1001_cifar10,
'seresnet1001_cifar100': seresnet1001_cifar100,
'seresnet1001_svhn': seresnet1001_svhn,
'seresnet1202_cifar10': seresnet1202_cifar10,
'seresnet1202_cifar100': seresnet1202_cifar100,
'seresnet1202_svhn': seresnet1202_svhn,
'sepreresnet20_cifar10': sepreresnet20_cifar10,
'sepreresnet20_cifar100': sepreresnet20_cifar100,
'sepreresnet20_svhn': sepreresnet20_svhn,
'sepreresnet56_cifar10': sepreresnet56_cifar10,
'sepreresnet56_cifar100': sepreresnet56_cifar100,
'sepreresnet56_svhn': sepreresnet56_svhn,
'sepreresnet110_cifar10': sepreresnet110_cifar10,
'sepreresnet110_cifar100': sepreresnet110_cifar100,
'sepreresnet110_svhn': sepreresnet110_svhn,
'sepreresnet164bn_cifar10': sepreresnet164bn_cifar10,
'sepreresnet164bn_cifar100': sepreresnet164bn_cifar100,
'sepreresnet164bn_svhn': sepreresnet164bn_svhn,
'sepreresnet272bn_cifar10': sepreresnet272bn_cifar10,
'sepreresnet272bn_cifar100': sepreresnet272bn_cifar100,
'sepreresnet272bn_svhn': sepreresnet272bn_svhn,
'sepreresnet542bn_cifar10': sepreresnet542bn_cifar10,
'sepreresnet542bn_cifar100': sepreresnet542bn_cifar100,
'sepreresnet542bn_svhn': sepreresnet542bn_svhn,
'sepreresnet1001_cifar10': sepreresnet1001_cifar10,
'sepreresnet1001_cifar100': sepreresnet1001_cifar100,
'sepreresnet1001_svhn': sepreresnet1001_svhn,
'sepreresnet1202_cifar10': sepreresnet1202_cifar10,
'sepreresnet1202_cifar100': sepreresnet1202_cifar100,
'sepreresnet1202_svhn': sepreresnet1202_svhn,
'pyramidnet110_a48_cifar10': pyramidnet110_a48_cifar10,
'pyramidnet110_a48_cifar100': pyramidnet110_a48_cifar100,
'pyramidnet110_a48_svhn': pyramidnet110_a48_svhn,
'pyramidnet110_a84_cifar10': pyramidnet110_a84_cifar10,
'pyramidnet110_a84_cifar100': pyramidnet110_a84_cifar100,
'pyramidnet110_a84_svhn': pyramidnet110_a84_svhn,
'pyramidnet110_a270_cifar10': pyramidnet110_a270_cifar10,
'pyramidnet110_a270_cifar100': pyramidnet110_a270_cifar100,
'pyramidnet110_a270_svhn': pyramidnet110_a270_svhn,
'pyramidnet164_a270_bn_cifar10': pyramidnet164_a270_bn_cifar10,
'pyramidnet164_a270_bn_cifar100': pyramidnet164_a270_bn_cifar100,
'pyramidnet164_a270_bn_svhn': pyramidnet164_a270_bn_svhn,
'pyramidnet200_a240_bn_cifar10': pyramidnet200_a240_bn_cifar10,
'pyramidnet200_a240_bn_cifar100': pyramidnet200_a240_bn_cifar100,
'pyramidnet200_a240_bn_svhn': pyramidnet200_a240_bn_svhn,
'pyramidnet236_a220_bn_cifar10': pyramidnet236_a220_bn_cifar10,
'pyramidnet236_a220_bn_cifar100': pyramidnet236_a220_bn_cifar100,
'pyramidnet236_a220_bn_svhn': pyramidnet236_a220_bn_svhn,
'pyramidnet272_a200_bn_cifar10': pyramidnet272_a200_bn_cifar10,
'pyramidnet272_a200_bn_cifar100': pyramidnet272_a200_bn_cifar100,
'pyramidnet272_a200_bn_svhn': pyramidnet272_a200_bn_svhn,
'densenet40_k12_cifar10': densenet40_k12_cifar10,
'densenet40_k12_cifar100': densenet40_k12_cifar100,
'densenet40_k12_svhn': densenet40_k12_svhn,
'densenet40_k12_bc_cifar10': densenet40_k12_bc_cifar10,
'densenet40_k12_bc_cifar100': densenet40_k12_bc_cifar100,
'densenet40_k12_bc_svhn': densenet40_k12_bc_svhn,
'densenet40_k24_bc_cifar10': densenet40_k24_bc_cifar10,
'densenet40_k24_bc_cifar100': densenet40_k24_bc_cifar100,
'densenet40_k24_bc_svhn': densenet40_k24_bc_svhn,
'densenet40_k36_bc_cifar10': densenet40_k36_bc_cifar10,
'densenet40_k36_bc_cifar100': densenet40_k36_bc_cifar100,
'densenet40_k36_bc_svhn': densenet40_k36_bc_svhn,
'densenet100_k12_cifar10': densenet100_k12_cifar10,
'densenet100_k12_cifar100': densenet100_k12_cifar100,
'densenet100_k12_svhn': densenet100_k12_svhn,
'densenet100_k24_cifar10': densenet100_k24_cifar10,
'densenet100_k24_cifar100': densenet100_k24_cifar100,
'densenet100_k24_svhn': densenet100_k24_svhn,
'densenet100_k12_bc_cifar10': densenet100_k12_bc_cifar10,
'densenet100_k12_bc_cifar100': densenet100_k12_bc_cifar100,
'densenet100_k12_bc_svhn': densenet100_k12_bc_svhn,
'densenet190_k40_bc_cifar10': densenet190_k40_bc_cifar10,
'densenet190_k40_bc_cifar100': densenet190_k40_bc_cifar100,
'densenet190_k40_bc_svhn': densenet190_k40_bc_svhn,
'densenet250_k24_bc_cifar10': densenet250_k24_bc_cifar10,
'densenet250_k24_bc_cifar100': densenet250_k24_bc_cifar100,
'densenet250_k24_bc_svhn': densenet250_k24_bc_svhn,
'xdensenet40_2_k24_bc_cifar10': xdensenet40_2_k24_bc_cifar10,
'xdensenet40_2_k24_bc_cifar100': xdensenet40_2_k24_bc_cifar100,
'xdensenet40_2_k24_bc_svhn': xdensenet40_2_k24_bc_svhn,
'xdensenet40_2_k36_bc_cifar10': xdensenet40_2_k36_bc_cifar10,
'xdensenet40_2_k36_bc_cifar100': xdensenet40_2_k36_bc_cifar100,
'xdensenet40_2_k36_bc_svhn': xdensenet40_2_k36_bc_svhn,
'wrn16_10_cifar10': wrn16_10_cifar10,
'wrn16_10_cifar100': wrn16_10_cifar100,
'wrn16_10_svhn': wrn16_10_svhn,
'wrn28_10_cifar10': wrn28_10_cifar10,
'wrn28_10_cifar100': wrn28_10_cifar100,
'wrn28_10_svhn': wrn28_10_svhn,
'wrn40_8_cifar10': wrn40_8_cifar10,
'wrn40_8_cifar100': wrn40_8_cifar100,
'wrn40_8_svhn': wrn40_8_svhn,
'wrn20_10_1bit_cifar10': wrn20_10_1bit_cifar10,
'wrn20_10_1bit_cifar100': wrn20_10_1bit_cifar100,
'wrn20_10_1bit_svhn': wrn20_10_1bit_svhn,
'wrn20_10_32bit_cifar10': wrn20_10_32bit_cifar10,
'wrn20_10_32bit_cifar100': wrn20_10_32bit_cifar100,
'wrn20_10_32bit_svhn': wrn20_10_32bit_svhn,
'ror3_56_cifar10': ror3_56_cifar10,
'ror3_56_cifar100': ror3_56_cifar100,
'ror3_56_svhn': ror3_56_svhn,
'ror3_110_cifar10': ror3_110_cifar10,
'ror3_110_cifar100': ror3_110_cifar100,
'ror3_110_svhn': ror3_110_svhn,
'ror3_164_cifar10': ror3_164_cifar10,
'ror3_164_cifar100': ror3_164_cifar100,
'ror3_164_svhn': ror3_164_svhn,
'rir_cifar10': rir_cifar10,
'rir_cifar100': rir_cifar100,
'rir_svhn': rir_svhn,
'resdropresnet20_cifar10': resdropresnet20_cifar10,
'resdropresnet20_cifar100': resdropresnet20_cifar100,
'resdropresnet20_svhn': resdropresnet20_svhn,
'shakeshakeresnet20_2x16d_cifar10': shakeshakeresnet20_2x16d_cifar10,
'shakeshakeresnet20_2x16d_cifar100': shakeshakeresnet20_2x16d_cifar100,
'shakeshakeresnet20_2x16d_svhn': shakeshakeresnet20_2x16d_svhn,
'shakeshakeresnet26_2x32d_cifar10': shakeshakeresnet26_2x32d_cifar10,
'shakeshakeresnet26_2x32d_cifar100': shakeshakeresnet26_2x32d_cifar100,
'shakeshakeresnet26_2x32d_svhn': shakeshakeresnet26_2x32d_svhn,
'shakedropresnet20_cifar10': shakedropresnet20_cifar10,
'shakedropresnet20_cifar100': shakedropresnet20_cifar100,
'shakedropresnet20_svhn': shakedropresnet20_svhn,
'fractalnet_cifar10': fractalnet_cifar10,
'fractalnet_cifar100': fractalnet_cifar100,
'diaresnet20_cifar10': diaresnet20_cifar10,
'diaresnet20_cifar100': diaresnet20_cifar100,
'diaresnet20_svhn': diaresnet20_svhn,
'diaresnet56_cifar10': diaresnet56_cifar10,
'diaresnet56_cifar100': diaresnet56_cifar100,
'diaresnet56_svhn': diaresnet56_svhn,
'diaresnet110_cifar10': diaresnet110_cifar10,
'diaresnet110_cifar100': diaresnet110_cifar100,
'diaresnet110_svhn': diaresnet110_svhn,
'diaresnet164bn_cifar10': diaresnet164bn_cifar10,
'diaresnet164bn_cifar100': diaresnet164bn_cifar100,
'diaresnet164bn_svhn': diaresnet164bn_svhn,
'diaresnet1001_cifar10': diaresnet1001_cifar10,
'diaresnet1001_cifar100': diaresnet1001_cifar100,
'diaresnet1001_svhn': diaresnet1001_svhn,
'diaresnet1202_cifar10': diaresnet1202_cifar10,
'diaresnet1202_cifar100': diaresnet1202_cifar100,
'diaresnet1202_svhn': diaresnet1202_svhn,
'diapreresnet20_cifar10': diapreresnet20_cifar10,
'diapreresnet20_cifar100': diapreresnet20_cifar100,
'diapreresnet20_svhn': diapreresnet20_svhn,
'diapreresnet56_cifar10': diapreresnet56_cifar10,
'diapreresnet56_cifar100': diapreresnet56_cifar100,
'diapreresnet56_svhn': diapreresnet56_svhn,
'diapreresnet110_cifar10': diapreresnet110_cifar10,
'diapreresnet110_cifar100': diapreresnet110_cifar100,
'diapreresnet110_svhn': diapreresnet110_svhn,
'diapreresnet164bn_cifar10': diapreresnet164bn_cifar10,
'diapreresnet164bn_cifar100': diapreresnet164bn_cifar100,
'diapreresnet164bn_svhn': diapreresnet164bn_svhn,
'diapreresnet1001_cifar10': diapreresnet1001_cifar10,
'diapreresnet1001_cifar100': diapreresnet1001_cifar100,
'diapreresnet1001_svhn': diapreresnet1001_svhn,
'diapreresnet1202_cifar10': diapreresnet1202_cifar10,
'diapreresnet1202_cifar100': diapreresnet1202_cifar100,
'diapreresnet1202_svhn': diapreresnet1202_svhn,
'isqrtcovresnet18': isqrtcovresnet18,
'isqrtcovresnet34': isqrtcovresnet34,
'isqrtcovresnet50': isqrtcovresnet50,
'isqrtcovresnet50b': isqrtcovresnet50b,
'isqrtcovresnet101': isqrtcovresnet101,
'isqrtcovresnet101b': isqrtcovresnet101b,
'resneta10': resneta10,
'resnetabc14b': resnetabc14b,
'resneta18': resneta18,
'resneta50b': resneta50b,
'resneta101b': resneta101b,
'resneta152b': resneta152b,
'resnetd50b': resnetd50b,
'resnetd101b': resnetd101b,
'resnetd152b': resnetd152b,
'fastseresnet101b': fastseresnet101b,
'octresnet10_ad2': octresnet10_ad2,
'octresnet50b_ad2': octresnet50b_ad2,
'octresnet20_ad2_cifar10': octresnet20_ad2_cifar10,
'octresnet20_ad2_cifar100': octresnet20_ad2_cifar100,
'octresnet20_ad2_svhn': octresnet20_ad2_svhn,
'octresnet56_ad2_cifar10': octresnet56_ad2_cifar10,
'octresnet56_ad2_cifar100': octresnet56_ad2_cifar100,
'octresnet56_ad2_svhn': octresnet56_ad2_svhn,
'res2net50_w14_s8': res2net50_w14_s8,
'res2net50_w26_s8': res2net50_w26_s8,
'resnet10_cub': resnet10_cub,
'resnet12_cub': resnet12_cub,
'resnet14_cub': resnet14_cub,
'resnetbc14b_cub': resnetbc14b_cub,
'resnet16_cub': resnet16_cub,
'resnet18_cub': resnet18_cub,
'resnet26_cub': resnet26_cub,
'resnetbc26b_cub': resnetbc26b_cub,
'resnet34_cub': resnet34_cub,
'resnetbc38b_cub': resnetbc38b_cub,
'resnet50_cub': resnet50_cub,
'resnet50b_cub': resnet50b_cub,
'resnet101_cub': resnet101_cub,
'resnet101b_cub': resnet101b_cub,
'resnet152_cub': resnet152_cub,
'resnet152b_cub': resnet152b_cub,
'resnet200_cub': resnet200_cub,
'resnet200b_cub': resnet200b_cub,
'seresnet10_cub': seresnet10_cub,
'seresnet12_cub': seresnet12_cub,
'seresnet14_cub': seresnet14_cub,
'seresnetbc14b_cub': seresnetbc14b_cub,
'seresnet16_cub': seresnet16_cub,
'seresnet18_cub': seresnet18_cub,
'seresnet26_cub': seresnet26_cub,
'seresnetbc26b_cub': seresnetbc26b_cub,
'seresnet34_cub': seresnet34_cub,
'seresnetbc38b_cub': seresnetbc38b_cub,
'seresnet50_cub': seresnet50_cub,
'seresnet50b_cub': seresnet50b_cub,
'seresnet101_cub': seresnet101_cub,
'seresnet101b_cub': seresnet101b_cub,
'seresnet152_cub': seresnet152_cub,
'seresnet152b_cub': seresnet152b_cub,
'seresnet200_cub': seresnet200_cub,
'seresnet200b_cub': seresnet200b_cub,
'mobilenet_w1_cub': mobilenet_w1_cub,
'mobilenet_w3d4_cub': mobilenet_w3d4_cub,
'mobilenet_wd2_cub': mobilenet_wd2_cub,
'mobilenet_wd4_cub': mobilenet_wd4_cub,
'fdmobilenet_w1_cub': fdmobilenet_w1_cub,
'fdmobilenet_w3d4_cub': fdmobilenet_w3d4_cub,
'fdmobilenet_wd2_cub': fdmobilenet_wd2_cub,
'fdmobilenet_wd4_cub': fdmobilenet_wd4_cub,
'proxylessnas_cpu_cub': proxylessnas_cpu_cub,
'proxylessnas_gpu_cub': proxylessnas_gpu_cub,
'proxylessnas_mobile_cub': proxylessnas_mobile_cub,
'proxylessnas_mobile14_cub': proxylessnas_mobile14_cub,
'ntsnet_cub': ntsnet_cub,
'fcn8sd_resnetd50b_voc': fcn8sd_resnetd50b_voc,
'fcn8sd_resnetd101b_voc': fcn8sd_resnetd101b_voc,
'fcn8sd_resnetd50b_coco': fcn8sd_resnetd50b_coco,
'fcn8sd_resnetd101b_coco': fcn8sd_resnetd101b_coco,
'fcn8sd_resnetd50b_ade20k': fcn8sd_resnetd50b_ade20k,
'fcn8sd_resnetd101b_ade20k': fcn8sd_resnetd101b_ade20k,
'fcn8sd_resnetd50b_cityscapes': fcn8sd_resnetd50b_cityscapes,
'fcn8sd_resnetd101b_cityscapes': fcn8sd_resnetd101b_cityscapes,
'pspnet_resnetd50b_voc': pspnet_resnetd50b_voc,
'pspnet_resnetd101b_voc': pspnet_resnetd101b_voc,
'pspnet_resnetd50b_coco': pspnet_resnetd50b_coco,
'pspnet_resnetd101b_coco': pspnet_resnetd101b_coco,
'pspnet_resnetd50b_ade20k': pspnet_resnetd50b_ade20k,
'pspnet_resnetd101b_ade20k': pspnet_resnetd101b_ade20k,
'pspnet_resnetd50b_cityscapes': pspnet_resnetd50b_cityscapes,
'pspnet_resnetd101b_cityscapes': pspnet_resnetd101b_cityscapes,
'deeplabv3_resnetd50b_voc': deeplabv3_resnetd50b_voc,
'deeplabv3_resnetd101b_voc': deeplabv3_resnetd101b_voc,
'deeplabv3_resnetd152b_voc': deeplabv3_resnetd152b_voc,
'deeplabv3_resnetd50b_coco': deeplabv3_resnetd50b_coco,
'deeplabv3_resnetd101b_coco': deeplabv3_resnetd101b_coco,
'deeplabv3_resnetd152b_coco': deeplabv3_resnetd152b_coco,
'deeplabv3_resnetd50b_ade20k': deeplabv3_resnetd50b_ade20k,
'deeplabv3_resnetd101b_ade20k': deeplabv3_resnetd101b_ade20k,
'deeplabv3_resnetd50b_cityscapes': deeplabv3_resnetd50b_cityscapes,
'deeplabv3_resnetd101b_cityscapes': deeplabv3_resnetd101b_cityscapes,
'icnet_resnetd50b_cityscapes': icnet_resnetd50b_cityscapes,
'fastscnn_cityscapes': fastscnn_cityscapes,
'cgnet_cityscapes': cgnet_cityscapes,
'dabnet_cityscapes': dabnet_cityscapes,
'sinet_cityscapes': sinet_cityscapes,
'bisenet_resnet18_celebamaskhq': bisenet_resnet18_celebamaskhq,
'danet_resnetd50b_cityscapes': danet_resnetd50b_cityscapes,
'danet_resnetd101b_cityscapes': danet_resnetd101b_cityscapes,
'fpenet_cityscapes': fpenet_cityscapes,
'lednet_cityscapes': lednet_cityscapes,
'superpointnet': superpointnet,
'alphapose_fastseresnet101b_coco': alphapose_fastseresnet101b_coco,
'simplepose_resnet18_coco': simplepose_resnet18_coco,
'simplepose_resnet50b_coco': simplepose_resnet50b_coco,
'simplepose_resnet101b_coco': simplepose_resnet101b_coco,
'simplepose_resnet152b_coco': simplepose_resnet152b_coco,
'simplepose_resneta50b_coco': simplepose_resneta50b_coco,
'simplepose_resneta101b_coco': simplepose_resneta101b_coco,
'simplepose_resneta152b_coco': simplepose_resneta152b_coco,
'simplepose_mobile_resnet18_coco': simplepose_mobile_resnet18_coco,
'simplepose_mobile_resnet50b_coco': simplepose_mobile_resnet50b_coco,
'simplepose_mobile_mobilenet_w1_coco': simplepose_mobile_mobilenet_w1_coco,
'simplepose_mobile_mobilenetv2b_w1_coco': simplepose_mobile_mobilenetv2b_w1_coco,
'simplepose_mobile_mobilenetv3_small_w1_coco': simplepose_mobile_mobilenetv3_small_w1_coco,
'simplepose_mobile_mobilenetv3_large_w1_coco': simplepose_mobile_mobilenetv3_large_w1_coco,
'lwopenpose2d_mobilenet_cmupan_coco': lwopenpose2d_mobilenet_cmupan_coco,
'lwopenpose3d_mobilenet_cmupan_coco': lwopenpose3d_mobilenet_cmupan_coco,
'ibppose_coco': ibppose_coco,
'centernet_resnet18_voc': centernet_resnet18_voc,
'centernet_resnet18_coco': centernet_resnet18_coco,
'centernet_resnet50b_voc': centernet_resnet50b_voc,
'centernet_resnet50b_coco': centernet_resnet50b_coco,
'centernet_resnet101b_voc': centernet_resnet101b_voc,
'centernet_resnet101b_coco': centernet_resnet101b_coco,
'lffd20x5s320v2_widerface': lffd20x5s320v2_widerface,
'lffd25x8s560v1_widerface': lffd25x8s560v1_widerface,
'visemenet20': visemenet20,
'voca8flame': voca8flame,
'nvpattexp116bazel76': nvpattexp116bazel76,
'jasper5x3': jasper5x3,
'jasper10x4': jasper10x4,
'jasper10x5': jasper10x5,
'jasperdr10x5_en': jasperdr10x5_en,
'jasperdr10x5_en_nr': jasperdr10x5_en_nr,
'quartznet5x5_en_ls': quartznet5x5_en_ls,
'quartznet15x5_en': quartznet15x5_en,
'quartznet15x5_en_nr': quartznet15x5_en_nr,
'quartznet15x5_fr': quartznet15x5_fr,
'quartznet15x5_de': quartznet15x5_de,
'quartznet15x5_it': quartznet15x5_it,
'quartznet15x5_es': quartznet15x5_es,
'quartznet15x5_ca': quartznet15x5_ca,
'quartznet15x5_pl': quartznet15x5_pl,
'quartznet15x5_ru': quartznet15x5_ru,
'quartznet15x5_ru34': quartznet15x5_ru34,
# 'oth_simple_pose_resnet18_v1b': oth_simple_pose_resnet18_v1b,
# 'oth_simple_pose_resnet50_v1b': oth_simple_pose_resnet50_v1b,
# 'oth_simple_pose_resnet101_v1b': oth_simple_pose_resnet101_v1b,
# 'oth_simple_pose_resnet152_v1b': oth_simple_pose_resnet152_v1b,
# 'oth_simple_pose_resnet50_v1d': oth_simple_pose_resnet50_v1d,
# 'oth_simple_pose_resnet101_v1d': oth_simple_pose_resnet101_v1d,
# 'oth_simple_pose_resnet152_v1d': oth_simple_pose_resnet152_v1d,
#
# 'oth_mobile_pose_resnet18_v1b': oth_mobile_pose_resnet18_v1b,
# 'oth_mobile_pose_resnet50_v1b': oth_mobile_pose_resnet50_v1b,
# 'oth_mobile_pose_mobilenet1_0': oth_mobile_pose_mobilenet1_0,
# 'oth_mobile_pose_mobilenetv2_1_0': oth_mobile_pose_mobilenetv2_1_0,
# 'oth_mobile_pose_mobilenetv3_small': oth_mobile_pose_mobilenetv3_small,
# 'oth_mobile_pose_mobilenetv3_large': oth_mobile_pose_mobilenetv3_large,
#
# 'oth_alpha_pose_resnet101_v1b_coco': oth_alpha_pose_resnet101_v1b_coco,
# 'oth_resnet50_v1d': oth_resnet50_v1d,
# 'oth_resnet101_v1d': oth_resnet101_v1d,
# 'oth_resnet152_v1d': oth_resnet152_v1d,
# 'oth_mobilenet_v2_1_0': oth_mobilenet_v2_1_0,
# 'oth_mobilenet_v2_0_75': oth_mobilenet_v2_0_75,
# 'oth_mobilenet_v2_0_5': oth_mobilenet_v2_0_5,
# 'oth_mobilenet_v2_0_25': oth_mobilenet_v2_0_25,
# 'oth_icnet_resnet50_citys': oth_icnet_resnet50_citys,
# 'center_net_resnet18_v1b_voc': center_net_resnet18_v1b_voc,
# 'center_net_resnet18_v1b_coco': center_net_resnet18_v1b_coco,
# 'center_net_resnet50_v1b_voc': center_net_resnet50_v1b_voc,
# 'center_net_resnet50_v1b_coco': center_net_resnet50_v1b_coco,
# 'center_net_resnet101_v1b_voc': center_net_resnet101_v1b_voc,
# 'center_net_resnet101_v1b_coco': center_net_resnet101_v1b_coco,
# 'oth_resnest14': oth_resnest14,
# 'oth_resnest26': oth_resnest26,
# 'oth_resnest50': oth_resnest50,
# 'oth_resnest101': oth_resnest101,
# 'oth_resnest200': oth_resnest200,
# 'oth_resnest269': oth_resnest269,
# 'oth_danet_resnet50_citys': oth_danet_resnet50_citys,
# 'oth_danet_resnet101_citys': oth_danet_resnet101_citys,
'regnetv002': regnetv002,
'regnetv004': regnetv004,
'regnetv006': regnetv006,
'regnetv008': regnetv008,
'regnetv016': regnetv016,
'regnetv032': regnetv032,
'regnetv040': regnetv040,
'regnetv064': regnetv064,
'regnetv080': regnetv080,
'regnetv120': regnetv120,
'regnetv160': regnetv160,
'regnetv320': regnetv320,
}
def get_model(name, **kwargs):
"""
Get supported model.
Parameters:
----------
name : str
Name of model.
Returns:
-------
HybridBlock
Resulted model.
"""
name = name.lower()
if name not in _models:
raise ValueError("Unsupported model: {}".format(name))
net = _models[name](**kwargs)
return net
| 47,262 | 35.666408 | 95 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/airnext.py | """
AirNeXt for ImageNet-1K, implemented in Gluon.
Original paper: 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
"""
__all__ = ['AirNeXt', 'airnext50_32x4d_r2', 'airnext101_32x4d_r2', 'airnext101_32x4d_r16']
import os
import math
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
from .airnet import AirBlock, AirInitBlock
class AirNeXtBottleneck(HybridBlock):
"""
AirNet bottleneck block for residual path in ResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
ratio: int
Air compression ratio.
in_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bn_use_global_stats,
ratio,
in_size,
**kwargs):
super(AirNeXtBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
D = int(math.floor(mid_channels * (bottleneck_width / 64.0)))
group_width = cardinality * D
self.use_air_block = (strides == 1 and mid_channels < 512)
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=group_width,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=group_width,
out_channels=group_width,
strides=strides,
groups=cardinality,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv1x1_block(
in_channels=group_width,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
if self.use_air_block:
self.air = AirBlock(
in_channels=in_channels,
out_channels=group_width,
groups=(cardinality // ratio),
bn_use_global_stats=bn_use_global_stats,
ratio=ratio,
in_size=in_size)
def hybrid_forward(self, F, x):
if self.use_air_block:
att = self.air(x)
x = self.conv1(x)
x = self.conv2(x)
if self.use_air_block:
x = x * att
x = self.conv3(x)
return x
class AirNeXtUnit(HybridBlock):
"""
AirNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
ratio: int
Air compression ratio.
in_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
strides,
cardinality,
bottleneck_width,
bn_use_global_stats,
ratio,
in_size,
**kwargs):
super(AirNeXtUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = AirNeXtBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats,
ratio=ratio,
in_size=in_size)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class AirNeXt(HybridBlock):
"""
AirNet model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant Representations,'
https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
ratio: int
Air compression ratio.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
cardinality,
bottleneck_width,
ratio,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(AirNeXt, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(AirInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
in_size = tuple([x // 4 for x in in_size])
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(AirNeXtUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
bn_use_global_stats=bn_use_global_stats,
ratio=ratio,
in_size=in_size))
in_channels = out_channels
in_size = tuple([x // strides for x in in_size])
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_airnext(blocks,
cardinality,
bottleneck_width,
base_channels,
ratio,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create AirNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
cardinality: int
Number of groups.
bottleneck_width: int
Width of bottleneck block.
base_channels: int
Base number of channels.
ratio: int
Air compression ratio.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
else:
raise ValueError("Unsupported AirNeXt with number of blocks: {}".format(blocks))
bottleneck_expansion = 4
init_block_channels = base_channels
channels_per_layers = [base_channels * (2 ** i) * bottleneck_expansion for i in range(len(layers))]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = AirNeXt(
channels=channels,
init_block_channels=init_block_channels,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
ratio=ratio,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def airnext50_32x4d_r2(**kwargs):
"""
AirNeXt50-32x4d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_airnext(
blocks=50,
cardinality=32,
bottleneck_width=4,
base_channels=64,
ratio=2,
model_name="airnext50_32x4d_r2",
**kwargs)
def airnext101_32x4d_r2(**kwargs):
"""
AirNeXt101-32x4d (r=2) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_airnext(
blocks=101,
cardinality=32,
bottleneck_width=4,
base_channels=64,
ratio=2,
model_name="airnext101_32x4d_r2",
**kwargs)
def airnext101_32x4d_r16(**kwargs):
"""
AirNeXt101-32x4d (r=16) model from 'Attention Inspiring Receptive-Fields Network for Learning Invariant
Representations,' https://ieeexplore.ieee.org/document/8510896.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_airnext(
blocks=101,
cardinality=32,
bottleneck_width=4,
base_channels=64,
ratio=16,
model_name="airnext101_32x4d_r16",
**kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
airnext50_32x4d_r2,
airnext101_32x4d_r2,
airnext101_32x4d_r16,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != airnext50_32x4d_r2 or weight_count == 27604296)
assert (model != airnext101_32x4d_r2 or weight_count == 54099272)
assert (model != airnext101_32x4d_r16 or weight_count == 45456456)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 13,827 | 31.845606 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/pspnet.py | """
PSPNet for image segmentation, implemented in Gluon.
Original paper: 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105.
"""
__all__ = ['PSPNet', 'pspnet_resnetd50b_voc', 'pspnet_resnetd101b_voc', 'pspnet_resnetd50b_coco',
'pspnet_resnetd101b_coco', 'pspnet_resnetd50b_ade20k', 'pspnet_resnetd101b_ade20k',
'pspnet_resnetd50b_cityscapes', 'pspnet_resnetd101b_cityscapes', 'PyramidPooling']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent, Identity
from .common import conv1x1, conv1x1_block, conv3x3_block
from .resnetd import resnetd50b, resnetd101b
class PSPFinalBlock(HybridBlock):
"""
PSPNet final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
bottleneck_factor=4,
**kwargs):
super(PSPFinalBlock, self).__init__(**kwargs)
assert (in_channels % bottleneck_factor == 0)
mid_channels = in_channels // bottleneck_factor
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
self.dropout = nn.Dropout(rate=0.1)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
def hybrid_forward(self, F, x, out_size):
x = self.conv1(x)
x = self.dropout(x)
x = self.conv2(x)
x = F.contrib.BilinearResize2D(x, height=out_size[0], width=out_size[1])
return x
class PyramidPoolingBranch(HybridBlock):
"""
Pyramid Pooling branch.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
pool_out_size : int
Target output size of the image.
upscale_out_size : tuple of 2 int or None
Spatial size of output image for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
pool_out_size,
upscale_out_size,
**kwargs):
super(PyramidPoolingBranch, self).__init__(**kwargs)
self.pool_out_size = pool_out_size
self.upscale_out_size = upscale_out_size
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels)
def hybrid_forward(self, F, x):
in_size = self.upscale_out_size if self.upscale_out_size is not None else x.shape[2:]
x = F.contrib.AdaptiveAvgPooling2D(x, output_size=self.pool_out_size)
x = self.conv(x)
x = F.contrib.BilinearResize2D(x, height=in_size[0], width=in_size[1])
return x
class PyramidPooling(HybridBlock):
"""
Pyramid Pooling module.
Parameters:
----------
in_channels : int
Number of input channels.
upscale_out_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
"""
def __init__(self,
in_channels,
upscale_out_size,
**kwargs):
super(PyramidPooling, self).__init__(**kwargs)
pool_out_sizes = [1, 2, 3, 6]
assert (len(pool_out_sizes) == 4)
assert (in_channels % 4 == 0)
mid_channels = in_channels // 4
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Identity())
for pool_out_size in pool_out_sizes:
self.branches.add(PyramidPoolingBranch(
in_channels=in_channels,
out_channels=mid_channels,
pool_out_size=pool_out_size,
upscale_out_size=upscale_out_size))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class PSPNet(HybridBlock):
"""
PSPNet model from 'Pyramid Scene Parsing Network,' https://arxiv.org/abs/1612.01105.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
classes=21,
**kwargs):
super(PSPNet, self).__init__(**kwargs)
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
with self.name_scope():
self.backbone = backbone
pool_out_size = (self.in_size[0] // 8, self.in_size[1] // 8) if fixed_size else None
self.pool = PyramidPooling(
in_channels=backbone_out_channels,
upscale_out_size=pool_out_size)
pool_out_channels = 2 * backbone_out_channels
self.final_block = PSPFinalBlock(
in_channels=pool_out_channels,
out_channels=classes,
bottleneck_factor=8)
if self.aux:
aux_out_channels = backbone_out_channels // 2
self.aux_block = PSPFinalBlock(
in_channels=aux_out_channels,
out_channels=classes,
bottleneck_factor=4)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
x, y = self.backbone(x)
x = self.pool(x)
x = self.final_block(x, in_size)
if self.aux:
y = self.aux_block(y, in_size)
return x, y
else:
return x
def get_pspnet(backbone,
classes,
aux=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create PSPNet model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = PSPNet(
backbone=backbone,
classes=classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def pspnet_resnetd50b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for Pascal VOC from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd50b_voc", **kwargs)
def pspnet_resnetd101b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for Pascal VOC from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd101b_voc", **kwargs)
def pspnet_resnetd50b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for COCO from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd50b_coco", **kwargs)
def pspnet_resnetd101b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for COCO from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd101b_coco", **kwargs)
def pspnet_resnetd50b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for ADE20K from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd50b_ade20k", **kwargs)
def pspnet_resnetd101b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for ADE20K from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd101b_ade20k", **kwargs)
def pspnet_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-50b for Cityscapes from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd50b_cityscapes", **kwargs)
def pspnet_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
PSPNet model on the base of ResNet(D)-101b for Cityscapes from 'Pyramid Scene Parsing Network,'
https://arxiv.org/abs/1612.01105.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_pspnet(backbone=backbone, classes=classes, aux=aux, model_name="pspnet_resnetd101b_cityscapes", **kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (480, 480)
aux = False
pretrained = False
models = [
# (pspnet_resnetd50b_voc, 21),
# (pspnet_resnetd101b_voc, 21),
# (pspnet_resnetd50b_coco, 21),
# (pspnet_resnetd101b_coco, 21),
# (pspnet_resnetd50b_ade20k, 150),
# (pspnet_resnetd101b_ade20k, 150),
(pspnet_resnetd50b_cityscapes, 19),
# (pspnet_resnetd101b_cityscapes, 19),
]
for model, classes in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != pspnet_resnetd50b_voc or weight_count == 49081578)
assert (model != pspnet_resnetd101b_voc or weight_count == 68073706)
assert (model != pspnet_resnetd50b_coco or weight_count == 49081578)
assert (model != pspnet_resnetd101b_coco or weight_count == 68073706)
assert (model != pspnet_resnetd50b_ade20k or weight_count == 49180908)
assert (model != pspnet_resnetd101b_ade20k or weight_count == 68173036)
assert (model != pspnet_resnetd50b_cityscapes or weight_count == 49080038)
assert (model != pspnet_resnetd101b_cityscapes or weight_count == 68072166)
else:
assert (model != pspnet_resnetd50b_voc or weight_count == 46716373)
assert (model != pspnet_resnetd101b_voc or weight_count == 65708501)
assert (model != pspnet_resnetd50b_coco or weight_count == 46716373)
assert (model != pspnet_resnetd101b_coco or weight_count == 65708501)
assert (model != pspnet_resnetd50b_ade20k or weight_count == 46782550)
assert (model != pspnet_resnetd101b_ade20k or weight_count == 65774678)
assert (model != pspnet_resnetd50b_cityscapes or weight_count == 46715347)
assert (model != pspnet_resnetd101b_cityscapes or weight_count == 65707475)
x = mx.nd.zeros((1, 3, in_size[0], in_size[1]), ctx=ctx)
ys = net(x)
y = ys[0] if aux else ys
assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and
(y.shape[3] == x.shape[3]))
if __name__ == "__main__":
_test()
| 19,131 | 37.035785 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/dla.py | """
DLA for ImageNet-1K, implemented in Gluon.
Original paper: 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
"""
__all__ = ['DLA', 'dla34', 'dla46c', 'dla46xc', 'dla60', 'dla60x', 'dla60xc', 'dla102', 'dla102x', 'dla102x2', 'dla169']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv1x1_block, conv3x3_block, conv7x7_block
from .resnet import ResBlock, ResBottleneck
from .resnext import ResNeXtBottleneck
class DLABottleneck(ResBottleneck):
"""
DLA bottleneck block for residual path in residual block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck_factor : int, default 2
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck_factor=2,
**kwargs):
super(DLABottleneck, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck_factor=bottleneck_factor,
**kwargs)
class DLABottleneckX(ResNeXtBottleneck):
"""
DLA ResNeXt-like bottleneck block for residual path in residual block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
cardinality: int, default 32
Number of groups.
bottleneck_width: int, default 8
Width of bottleneck block.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
cardinality=32,
bottleneck_width=8,
**kwargs):
super(DLABottleneckX, self).__init__(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
cardinality=cardinality,
bottleneck_width=bottleneck_width,
**kwargs)
class DLAResBlock(HybridBlock):
"""
DLA residual block with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
body_class : nn.Module, default ResBlock
Residual block body class.
return_down : bool, default False
Whether return downsample result.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
body_class=ResBlock,
return_down=False,
**kwargs):
super(DLAResBlock, self).__init__(**kwargs)
self.return_down = return_down
self.downsample = (strides > 1)
self.project = (in_channels != out_channels)
with self.name_scope():
self.body = body_class(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
self.activ = nn.Activation("relu")
if self.downsample:
self.downsample_pool = nn.MaxPool2D(
pool_size=strides,
strides=strides)
if self.project:
self.project_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
down = self.downsample_pool(x) if self.downsample else x
identity = self.project_conv(down) if self.project else down
if identity is None:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
if self.return_down:
return x, down
else:
return x
class DLARoot(HybridBlock):
"""
DLA root block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
residual : bool
Whether use residual connection.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
residual,
**kwargs):
super(DLARoot, self).__init__(**kwargs)
self.residual = residual
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x2, x1, extra):
last_branch = x2
x = F.concat(x2, x1, *extra, dim=1)
x = self.conv(x)
if self.residual:
x = x + last_branch
x = self.activ(x)
return x
class DLATree(HybridBlock):
"""
DLA tree unit. It's like iterative stage.
Parameters:
----------
levels : int
Number of levels in the stage.
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
res_body_class : nn.Module
Residual block body class.
strides : int or tuple/list of 2 int
Strides of the convolution in a residual block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
root_residual : bool
Whether use residual connection in the root.
root_dim : int
Number of input channels in the root block.
first_tree : bool, default False
Is this tree stage the first stage in the net.
input_level : bool, default True
Is this tree unit the first unit in the stage.
return_down : bool, default False
Whether return downsample result.
"""
def __init__(self,
levels,
in_channels,
out_channels,
res_body_class,
strides,
bn_use_global_stats,
root_residual,
root_dim=0,
first_tree=False,
input_level=True,
return_down=False,
**kwargs):
super(DLATree, self).__init__(**kwargs)
self.return_down = return_down
self.add_down = (input_level and not first_tree)
self.root_level = (levels == 1)
if root_dim == 0:
root_dim = 2 * out_channels
if self.add_down:
root_dim += in_channels
with self.name_scope():
if self.root_level:
self.tree1 = DLAResBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
body_class=res_body_class,
return_down=True)
self.tree2 = DLAResBlock(
in_channels=out_channels,
out_channels=out_channels,
strides=1,
bn_use_global_stats=bn_use_global_stats,
body_class=res_body_class,
return_down=False)
else:
self.tree1 = DLATree(
levels=levels - 1,
in_channels=in_channels,
out_channels=out_channels,
res_body_class=res_body_class,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
root_residual=root_residual,
root_dim=0,
input_level=False,
return_down=True)
self.tree2 = DLATree(
levels=levels - 1,
in_channels=out_channels,
out_channels=out_channels,
res_body_class=res_body_class,
strides=1,
bn_use_global_stats=bn_use_global_stats,
root_residual=root_residual,
root_dim=root_dim + out_channels,
input_level=False,
return_down=False)
if self.root_level:
self.root = DLARoot(
in_channels=root_dim,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
residual=root_residual)
def hybrid_forward(self, F, x, extra=None):
extra = [] if extra is None else extra
x1, down = self.tree1(x)
if self.add_down:
extra.append(down)
if self.root_level:
x2 = self.tree2(x1)
x = self.root(x2, x1, extra)
else:
extra.append(x1)
x = self.tree2(x1, extra)
if self.return_down:
return x, down
else:
return x
class DLAInitBlock(HybridBlock):
"""
DLA specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats=False,
**kwargs):
super(DLAInitBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.conv1 = conv7x7_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class DLA(HybridBlock):
"""
DLA model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
levels : int
Number of levels in each stage.
channels : list of int
Number of output channels for each stage.
init_block_channels : int
Number of output channels for the initial unit.
res_body_class : nn.Module
Residual block body class.
residual_root : bool
Whether use residual connection in the root blocks.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
levels,
channels,
init_block_channels,
res_body_class,
residual_root,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(DLA, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(DLAInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i in range(len(levels)):
levels_i = levels[i]
out_channels = channels[i]
first_tree = (i == 0)
self.features.add(DLATree(
levels=levels_i,
in_channels=in_channels,
out_channels=out_channels,
res_body_class=res_body_class,
strides=2,
bn_use_global_stats=bn_use_global_stats,
root_residual=residual_root,
first_tree=first_tree))
in_channels = out_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=True))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_dla(levels,
channels,
res_body_class,
residual_root=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create DLA model with specific parameters.
Parameters:
----------
levels : int
Number of levels in each stage.
channels : list of int
Number of output channels for each stage.
res_body_class : nn.Module
Residual block body class.
residual_root : bool, default False
Whether use residual connection in the root blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
net = DLA(
levels=levels,
channels=channels,
init_block_channels=init_block_channels,
res_body_class=res_body_class,
residual_root=residual_root,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def dla34(**kwargs):
"""
DLA-34 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 2, 1], channels=[64, 128, 256, 512], res_body_class=ResBlock, model_name="dla34",
**kwargs)
def dla46c(**kwargs):
"""
DLA-46-C model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 2, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneck, model_name="dla46c",
**kwargs)
def dla46xc(**kwargs):
"""
DLA-X-46-C model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 2, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneckX,
model_name="dla46xc", **kwargs)
def dla60(**kwargs):
"""
DLA-60 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 3, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneck,
model_name="dla60", **kwargs)
def dla60x(**kwargs):
"""
DLA-X-60 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 3, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX,
model_name="dla60x", **kwargs)
def dla60xc(**kwargs):
"""
DLA-X-60-C model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 2, 3, 1], channels=[64, 64, 128, 256], res_body_class=DLABottleneckX,
model_name="dla60xc", **kwargs)
def dla102(**kwargs):
"""
DLA-102 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 3, 4, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneck,
residual_root=True, model_name="dla102", **kwargs)
def dla102x(**kwargs):
"""
DLA-X-102 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[1, 3, 4, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX,
residual_root=True, model_name="dla102x", **kwargs)
def dla102x2(**kwargs):
"""
DLA-X2-102 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
class DLABottleneckX64(DLABottleneckX):
def __init__(self, in_channels, out_channels, strides, bn_use_global_stats):
super(DLABottleneckX64, self).__init__(in_channels, out_channels, strides, bn_use_global_stats,
cardinality=64)
return get_dla(levels=[1, 3, 4, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneckX64,
residual_root=True, model_name="dla102x2", **kwargs)
def dla169(**kwargs):
"""
DLA-169 model from 'Deep Layer Aggregation,' https://arxiv.org/abs/1707.06484.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_dla(levels=[2, 3, 5, 1], channels=[128, 256, 512, 1024], res_body_class=DLABottleneck,
residual_root=True, model_name="dla169", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
dla34,
dla46c,
dla46xc,
dla60,
dla60x,
dla60xc,
dla102,
dla102x,
dla102x2,
dla169,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != dla34 or weight_count == 15742104)
assert (model != dla46c or weight_count == 1301400)
assert (model != dla46xc or weight_count == 1068440)
assert (model != dla60 or weight_count == 22036632)
assert (model != dla60x or weight_count == 17352344)
assert (model != dla60xc or weight_count == 1319832)
assert (model != dla102 or weight_count == 33268888)
assert (model != dla102x or weight_count == 26309272)
assert (model != dla102x2 or weight_count == 41282200)
assert (model != dla169 or weight_count == 53389720)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 23,814 | 32.401122 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/proxylessnas.py | """
ProxylessNAS for ImageNet-1K, implemented in Gluon.
Original paper: 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
"""
__all__ = ['ProxylessNAS', 'proxylessnas_cpu', 'proxylessnas_gpu', 'proxylessnas_mobile', 'proxylessnas_mobile14',
'ProxylessUnit', 'get_proxylessnas']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import ConvBlock, conv1x1_block, conv3x3_block
class ProxylessBlock(HybridBlock):
"""
ProxylessNAS block for residual path in ProxylessNAS unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size.
strides : int
Strides of the convolution.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
expansion : int
Expansion ratio.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
bn_epsilon,
bn_use_global_stats,
expansion,
**kwargs):
super(ProxylessBlock, self).__init__(**kwargs)
self.use_bc = (expansion > 1)
mid_channels = in_channels * expansion
with self.name_scope():
if self.use_bc:
self.bc_conv = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation="relu6")
padding = (kernel_size - 1) // 2
self.dw_conv = ConvBlock(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
groups=mid_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation="relu6")
self.pw_conv = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.use_bc:
x = self.bc_conv(x)
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
class ProxylessUnit(HybridBlock):
"""
ProxylessNAS unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
Convolution window size for body block.
strides : int
Strides of the convolution.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
expansion : int
Expansion ratio for body block.
residual : bool
Whether to use residual branch.
shortcut : bool
Whether to use identity branch.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
bn_epsilon,
bn_use_global_stats,
expansion,
residual,
shortcut,
**kwargs):
super(ProxylessUnit, self).__init__(**kwargs)
assert (residual or shortcut)
self.residual = residual
self.shortcut = shortcut
with self.name_scope():
if self.residual:
self.body = ProxylessBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
expansion=expansion)
def hybrid_forward(self, F, x):
if not self.residual:
return x
if not self.shortcut:
return self.body(x)
identity = x
x = self.body(x)
x = identity + x
return x
class ProxylessNAS(HybridBlock):
"""
ProxylessNAS model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final unit.
residuals : list of list of int
Whether to use residual branch in units.
shortcuts : list of list of int
Whether to use identity branch in units.
kernel_sizes : list of list of int
Convolution window size for each units.
expansions : list of list of int
Expansion ratio for each units.
bn_epsilon : float, default 1e-3
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
residuals,
shortcuts,
kernel_sizes,
expansions,
bn_epsilon=1e-3,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ProxylessNAS, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation="relu6"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
residuals_per_stage = residuals[i]
shortcuts_per_stage = shortcuts[i]
kernel_sizes_per_stage = kernel_sizes[i]
expansions_per_stage = expansions[i]
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
residual = (residuals_per_stage[j] == 1)
shortcut = (shortcuts_per_stage[j] == 1)
kernel_size = kernel_sizes_per_stage[j]
expansion = expansions_per_stage[j]
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ProxylessUnit(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
expansion=expansion,
residual=residual,
shortcut=shortcut))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
activation="relu6"))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_proxylessnas(version,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ProxylessNAS model with specific parameters.
Parameters:
----------
version : str
Version of ProxylessNAS ('cpu', 'gpu', 'mobile' or 'mobile14').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version == "cpu":
residuals = [[1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 0, 0, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
channels = [[24], [32, 32, 32, 32], [48, 48, 48, 48], [88, 88, 88, 88, 104, 104, 104, 104],
[216, 216, 216, 216, 360]]
kernel_sizes = [[3], [3, 3, 3, 3], [3, 3, 3, 5], [3, 3, 3, 3, 5, 3, 3, 3], [5, 5, 5, 3, 5]]
expansions = [[1], [6, 3, 3, 3], [6, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 3, 3, 3, 6]]
init_block_channels = 40
final_block_channels = 1432
elif version == "gpu":
residuals = [[1], [1, 0, 0, 0], [1, 0, 0, 1], [1, 0, 0, 1, 1, 0, 1, 1], [1, 1, 1, 1, 1]]
channels = [[24], [32, 32, 32, 32], [56, 56, 56, 56], [112, 112, 112, 112, 128, 128, 128, 128],
[256, 256, 256, 256, 432]]
kernel_sizes = [[3], [5, 3, 3, 3], [7, 3, 3, 3], [7, 5, 5, 5, 5, 3, 3, 5], [7, 7, 7, 5, 7]]
expansions = [[1], [3, 3, 3, 3], [3, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 6, 6, 6, 6]]
init_block_channels = 40
final_block_channels = 1728
elif version == "mobile":
residuals = [[1], [1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
channels = [[16], [32, 32, 32, 32], [40, 40, 40, 40], [80, 80, 80, 80, 96, 96, 96, 96],
[192, 192, 192, 192, 320]]
kernel_sizes = [[3], [5, 3, 3, 3], [7, 3, 5, 5], [7, 5, 5, 5, 5, 5, 5, 5], [7, 7, 7, 7, 7]]
expansions = [[1], [3, 3, 3, 3], [3, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 6, 3, 3, 6]]
init_block_channels = 32
final_block_channels = 1280
elif version == "mobile14":
residuals = [[1], [1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1]]
channels = [[24], [40, 40, 40, 40], [56, 56, 56, 56], [112, 112, 112, 112, 136, 136, 136, 136],
[256, 256, 256, 256, 448]]
kernel_sizes = [[3], [5, 3, 3, 3], [7, 3, 5, 5], [7, 5, 5, 5, 5, 5, 5, 5], [7, 7, 7, 7, 7]]
expansions = [[1], [3, 3, 3, 3], [3, 3, 3, 3], [6, 3, 3, 3, 6, 3, 3, 3], [6, 6, 3, 3, 6]]
init_block_channels = 48
final_block_channels = 1792
else:
raise ValueError("Unsupported ProxylessNAS version: {}".format(version))
shortcuts = [[0], [0, 1, 1, 1], [0, 1, 1, 1], [0, 1, 1, 1, 0, 1, 1, 1], [0, 1, 1, 1, 0]]
net = ProxylessNAS(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
residuals=residuals,
shortcuts=shortcuts,
kernel_sizes=kernel_sizes,
expansions=expansions,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def proxylessnas_cpu(**kwargs):
"""
ProxylessNAS (CPU) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="cpu", model_name="proxylessnas_cpu", **kwargs)
def proxylessnas_gpu(**kwargs):
"""
ProxylessNAS (GPU) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="gpu", model_name="proxylessnas_gpu", **kwargs)
def proxylessnas_mobile(**kwargs):
"""
ProxylessNAS (Mobile) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="mobile", model_name="proxylessnas_mobile", **kwargs)
def proxylessnas_mobile14(**kwargs):
"""
ProxylessNAS (Mobile-14) model from 'ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware,'
https://arxiv.org/abs/1812.00332.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_proxylessnas(version="mobile14", model_name="proxylessnas_mobile14", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
proxylessnas_cpu,
proxylessnas_gpu,
proxylessnas_mobile,
proxylessnas_mobile14,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != proxylessnas_cpu or weight_count == 4361648)
assert (model != proxylessnas_gpu or weight_count == 7119848)
assert (model != proxylessnas_mobile or weight_count == 4080512)
assert (model != proxylessnas_mobile14 or weight_count == 6857568)
x = mx.nd.zeros((14, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (14, 1000))
if __name__ == "__main__":
_test()
| 16,517 | 35.788419 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/isqrtcovresnet.py | """
iSQRT-COV-ResNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix Square Root
Normalization,' https://arxiv.org/abs/1712.01034.
"""
__all__ = ['iSQRTCOVResNet', 'isqrtcovresnet18', 'isqrtcovresnet34', 'isqrtcovresnet50', 'isqrtcovresnet50b',
'isqrtcovresnet101', 'isqrtcovresnet101b']
import os
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block
from .resnet import ResUnit, ResInitBlock
class CovPool(mx.autograd.Function):
"""
Covariance pooling function.
"""
def forward(self, x):
batch, channels, height, width = x.shape
n = height * width
xn = x.reshape(batch, channels, n)
identity_bar = ((1.0 / n) * mx.nd.eye(n, ctx=xn.context, dtype=xn.dtype)).expand_dims(axis=0).repeat(
repeats=batch, axis=0)
ones_bar = mx.nd.full(shape=(batch, n, n), val=(-1.0 / n / n), ctx=xn.context, dtype=xn.dtype)
i_bar = identity_bar + ones_bar
sigma = mx.nd.batch_dot(mx.nd.batch_dot(xn, i_bar), xn.transpose(axes=(0, 2, 1)))
self.save_for_backward(x, i_bar)
return sigma
def backward(self, grad_sigma):
x, i_bar = self.saved_tensors
batch, channels, height, width = x.shape
n = height * width
xn = x.reshape(batch, channels, n)
grad_x = grad_sigma + grad_sigma.transpose(axes=(0, 2, 1))
grad_x = mx.nd.batch_dot(mx.nd.batch_dot(grad_x, xn), i_bar)
grad_x = grad_x.reshape(batch, channels, height, width)
return grad_x
class NewtonSchulzSqrt(mx.autograd.Function):
"""
Newton-Schulz iterative matrix square root function.
Parameters:
----------
n : int
Number of iterations (n > 1).
"""
def __init__(self, n):
super(NewtonSchulzSqrt, self).__init__()
assert (n > 1)
self.n = n
def forward(self, x):
n = self.n
batch, cols, rows = x.shape
assert (cols == rows)
m = cols
identity = mx.nd.eye(m, ctx=x.context, dtype=x.dtype).expand_dims(axis=0).repeat(repeats=batch, axis=0)
x_trace = (x * identity).sum(axis=(1, 2), keepdims=True)
a = x / x_trace
i3 = 3.0 * identity
yi = mx.nd.zeros(shape=(batch, n - 1, m, m), ctx=x.context, dtype=x.dtype)
zi = mx.nd.zeros(shape=(batch, n - 1, m, m), ctx=x.context, dtype=x.dtype)
b2 = 0.5 * (i3 - a)
yi[:, 0, :, :] = mx.nd.batch_dot(a, b2)
zi[:, 0, :, :] = b2
for i in range(1, n - 1):
b2 = 0.5 * (i3 - mx.nd.batch_dot(zi[:, i - 1, :, :], yi[:, i - 1, :, :]))
yi[:, i, :, :] = mx.nd.batch_dot(yi[:, i - 1, :, :], b2)
zi[:, i, :, :] = mx.nd.batch_dot(b2, zi[:, i - 1, :, :])
b2 = 0.5 * (i3 - mx.nd.batch_dot(zi[:, n - 2, :, :], yi[:, n - 2, :, :]))
yn = mx.nd.batch_dot(yi[:, n - 2, :, :], b2)
x_trace_sqrt = x_trace.sqrt()
c = yn * x_trace_sqrt
self.save_for_backward(x, x_trace, a, yi, zi, yn, x_trace_sqrt)
return c
def backward(self, grad_c):
x, x_trace, a, yi, zi, yn, x_trace_sqrt = self.saved_tensors
n = self.n
batch, m, _ = x.shape
identity0 = mx.nd.eye(m, ctx=x.context, dtype=x.dtype)
identity = identity0.expand_dims(axis=0).repeat(repeats=batch, axis=0)
i3 = 3.0 * identity
grad_yn = grad_c * x_trace_sqrt
b = i3 - mx.nd.batch_dot(yi[:, n - 2, :, :], zi[:, n - 2, :, :])
grad_yi = 0.5 * (mx.nd.batch_dot(grad_yn, b) - mx.nd.batch_dot(mx.nd.batch_dot(
zi[:, n - 2, :, :], yi[:, n - 2, :, :]), grad_yn))
grad_zi = -0.5 * mx.nd.batch_dot(mx.nd.batch_dot(yi[:, n - 2, :, :], grad_yn), yi[:, n - 2, :, :])
for i in range(n - 3, -1, -1):
b = i3 - mx.nd.batch_dot(yi[:, i, :, :], zi[:, i, :, :])
ziyi = mx.nd.batch_dot(zi[:, i, :, :], yi[:, i, :, :])
grad_yi_m1 = 0.5 * (mx.nd.batch_dot(grad_yi, b) - mx.nd.batch_dot(mx.nd.batch_dot(
zi[:, i, :, :], grad_zi), zi[:, i, :, :]) - mx.nd.batch_dot(ziyi, grad_yi))
grad_zi_m1 = 0.5 * (mx.nd.batch_dot(b, grad_zi) - mx.nd.batch_dot(mx.nd.batch_dot(
yi[:, i, :, :], grad_yi), yi[:, i, :, :]) - mx.nd.batch_dot(grad_zi, ziyi))
grad_yi = grad_yi_m1
grad_zi = grad_zi_m1
grad_a = 0.5 * (mx.nd.batch_dot(grad_yi, i3 - a) - grad_zi - mx.nd.batch_dot(a, grad_yi))
x_trace_sqr = x_trace * x_trace
grad_atx_trace = (mx.nd.batch_dot(grad_a.transpose(axes=(0, 2, 1)), x) * identity).sum(
axis=(1, 2), keepdims=True)
grad_cty_trace = (mx.nd.batch_dot(grad_c.transpose(axes=(0, 2, 1)), yn) * identity).sum(
axis=(1, 2), keepdims=True)
grad_x_extra = (0.5 * grad_cty_trace / x_trace_sqrt - grad_atx_trace / x_trace_sqr).tile(
reps=(1, m, m)) * identity
grad_x = grad_a / x_trace + grad_x_extra
return grad_x
class Triuvec(mx.autograd.Function):
"""
Extract upper triangular part of matrix into vector form.
"""
def forward(self, x):
batch, cols, rows = x.shape
assert (cols == rows)
n = cols
import numpy as np
triuvec_inds = np.triu(np.ones(n)).reshape(-1).nonzero()[0]
x_vec = x.reshape(batch, -1)
y = x_vec[:, triuvec_inds]
self.save_for_backward(x, triuvec_inds)
return y
def backward(self, grad_y):
x, triuvec_inds = self.saved_tensors
batch, n, _ = x.shape
grad_x = mx.nd.zeros_like(x).reshape(batch, -1)
grad_x[:, triuvec_inds] = grad_y
grad_x = grad_x.reshape(batch, n, n)
return grad_x
class iSQRTCOVPool(HybridBlock):
"""
iSQRT-COV pooling layer.
Parameters:
----------
num_iter : int, default 5
Number of iterations (num_iter > 1).
"""
def __init__(self,
num_iter=5,
**kwargs):
super(iSQRTCOVPool, self).__init__(**kwargs)
with self.name_scope():
self.cov_pool = CovPool()
self.sqrt = NewtonSchulzSqrt(num_iter)
self.triuvec = Triuvec()
def hybrid_forward(self, F, x):
x = self.cov_pool(x)
x = self.sqrt(x)
x = self.triuvec(x)
return x
class iSQRTCOVResNet(HybridBlock):
"""
iSQRT-COV-ResNet model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix
Square Root Normalization,' https://arxiv.org/abs/1712.01034.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
bottleneck,
conv1_stride,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000):
super(iSQRTCOVResNet, self).__init__()
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i not in [0, len(channels) - 1]) else 1
stage.add(ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = final_block_channels
self.features.add(iSQRTCOVPool())
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
in_units = in_channels * (in_channels + 1) // 2
self.output.add(nn.Dense(
units=classes,
in_units=in_units))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_isqrtcovresnet(blocks,
conv1_stride=True,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create iSQRT-COV-ResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 18:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported iSQRT-COV-ResNet with number of blocks: {}".format(blocks))
init_block_channels = 64
final_block_channels = 256
if blocks < 50:
channels_per_layers = [64, 128, 256, 512]
bottleneck = False
else:
channels_per_layers = [256, 512, 1024, 2048]
bottleneck = True
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = iSQRTCOVResNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def isqrtcovresnet18(**kwargs):
"""
iSQRT-COV-ResNet-18 model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix
Square Root Normalization,' https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=18, model_name="isqrtcovresnet18", **kwargs)
def isqrtcovresnet34(**kwargs):
"""
iSQRT-COV-ResNet-34 model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix
Square Root Normalization,' https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=34, model_name="isqrtcovresnet34", **kwargs)
def isqrtcovresnet50(**kwargs):
"""
iSQRT-COV-ResNet-50 model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix
Square Root Normalization,' https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=50, model_name="isqrtcovresnet50", **kwargs)
def isqrtcovresnet50b(**kwargs):
"""
iSQRT-COV-ResNet-50 model with stride at the second convolution in bottleneck block from 'Towards Faster Training
of Global Covariance Pooling Networks by Iterative Matrix Square Root Normalization,'
https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=50, conv1_stride=False, model_name="isqrtcovresnet50b", **kwargs)
def isqrtcovresnet101(**kwargs):
"""
iSQRT-COV-ResNet-101 model from 'Towards Faster Training of Global Covariance Pooling Networks by Iterative Matrix
Square Root Normalization,' https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=101, model_name="isqrtcovresnet101", **kwargs)
def isqrtcovresnet101b(**kwargs):
"""
iSQRT-COV-ResNet-101 model with stride at the second convolution in bottleneck block from 'Towards Faster Training
of Global Covariance Pooling Networks by Iterative Matrix Square Root Normalization,'
https://arxiv.org/abs/1712.01034.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_isqrtcovresnet(blocks=101, conv1_stride=False, model_name="isqrtcovresnet101b", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
isqrtcovresnet18,
isqrtcovresnet34,
isqrtcovresnet50,
isqrtcovresnet50b,
isqrtcovresnet101,
isqrtcovresnet101b,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != isqrtcovresnet18 or weight_count == 44205096)
assert (model != isqrtcovresnet34 or weight_count == 54313256)
assert (model != isqrtcovresnet50 or weight_count == 56929832)
assert (model != isqrtcovresnet50b or weight_count == 56929832)
assert (model != isqrtcovresnet101 or weight_count == 75921960)
assert (model != isqrtcovresnet101b or weight_count == 75921960)
x = mx.nd.random.randn(14, 3, 224, 224, ctx=ctx)
# y = net(x)
x.attach_grad()
with mx.autograd.record():
y = net(x)
y.backward()
# print(x.grad)
assert (y.shape == (14, 1000))
if __name__ == "__main__":
_test()
| 17,607 | 35.683333 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/shufflenetv2.py | """
ShuffleNet V2 for ImageNet-1K, implemented in Gluon.
Original paper: 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
"""
__all__ = ['ShuffleNetV2', 'shufflenetv2_wd2', 'shufflenetv2_w1', 'shufflenetv2_w3d2', 'shufflenetv2_w2']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, depthwise_conv3x3, conv1x1_block, conv3x3_block, ChannelShuffle, SEBlock
class ShuffleUnit(HybridBlock):
"""
ShuffleNetV2 unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
downsample : bool
Whether do downsample.
use_se : bool
Whether to use SE block.
use_residual : bool
Whether to use residual connection.
"""
def __init__(self,
in_channels,
out_channels,
downsample,
use_se,
use_residual,
**kwargs):
super(ShuffleUnit, self).__init__(**kwargs)
self.downsample = downsample
self.use_se = use_se
self.use_residual = use_residual
mid_channels = out_channels // 2
with self.name_scope():
self.compress_conv1 = conv1x1(
in_channels=(in_channels if self.downsample else mid_channels),
out_channels=mid_channels)
self.compress_bn1 = nn.BatchNorm(in_channels=mid_channels)
self.dw_conv2 = depthwise_conv3x3(
channels=mid_channels,
strides=(2 if self.downsample else 1))
self.dw_bn2 = nn.BatchNorm(in_channels=mid_channels)
self.expand_conv3 = conv1x1(
in_channels=mid_channels,
out_channels=mid_channels)
self.expand_bn3 = nn.BatchNorm(in_channels=mid_channels)
if self.use_se:
self.se = SEBlock(channels=mid_channels)
if downsample:
self.dw_conv4 = depthwise_conv3x3(
channels=in_channels,
strides=2)
self.dw_bn4 = nn.BatchNorm(in_channels=in_channels)
self.expand_conv5 = conv1x1(
in_channels=in_channels,
out_channels=mid_channels)
self.expand_bn5 = nn.BatchNorm(in_channels=mid_channels)
self.activ = nn.Activation("relu")
self.c_shuffle = ChannelShuffle(
channels=out_channels,
groups=2)
def hybrid_forward(self, F, x):
if self.downsample:
y1 = self.dw_conv4(x)
y1 = self.dw_bn4(y1)
y1 = self.expand_conv5(y1)
y1 = self.expand_bn5(y1)
y1 = self.activ(y1)
x2 = x
else:
y1, x2 = F.split(x, axis=1, num_outputs=2)
y2 = self.compress_conv1(x2)
y2 = self.compress_bn1(y2)
y2 = self.activ(y2)
y2 = self.dw_conv2(y2)
y2 = self.dw_bn2(y2)
y2 = self.expand_conv3(y2)
y2 = self.expand_bn3(y2)
y2 = self.activ(y2)
if self.use_se:
y2 = self.se(y2)
if self.use_residual and not self.downsample:
y2 = y2 + x2
x = F.concat(y1, y2, dim=1)
x = self.c_shuffle(x)
return x
class ShuffleInitBlock(HybridBlock):
"""
ShuffleNetV2 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
"""
def __init__(self,
in_channels,
out_channels,
**kwargs):
super(ShuffleInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0,
ceil_mode=True)
def hybrid_forward(self, F, x):
x = self.conv(x)
x = self.pool(x)
return x
class ShuffleNetV2(HybridBlock):
"""
ShuffleNetV2 model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
use_se : bool, default False
Whether to use SE block.
use_residual : bool, default False
Whether to use residual connections.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
use_se=False,
use_residual=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(ShuffleNetV2, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(ShuffleInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
downsample = (j == 0)
stage.add(ShuffleUnit(
in_channels=in_channels,
out_channels=out_channels,
downsample=downsample,
use_se=use_se,
use_residual=use_residual))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_shufflenetv2(width_scale,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ShuffleNetV2 model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 24
final_block_channels = 1024
layers = [4, 8, 4]
channels_per_layers = [116, 232, 464]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) for cij in ci] for ci in channels]
if width_scale > 1.5:
final_block_channels = int(final_block_channels * width_scale)
net = ShuffleNetV2(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def shufflenetv2_wd2(**kwargs):
"""
ShuffleNetV2 0.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(12.0 / 29.0), model_name="shufflenetv2_wd2", **kwargs)
def shufflenetv2_w1(**kwargs):
"""
ShuffleNetV2 1x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=1.0, model_name="shufflenetv2_w1", **kwargs)
def shufflenetv2_w3d2(**kwargs):
"""
ShuffleNetV2 1.5x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(44.0 / 29.0), model_name="shufflenetv2_w3d2", **kwargs)
def shufflenetv2_w2(**kwargs):
"""
ShuffleNetV2 2x model from 'ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design,'
https://arxiv.org/abs/1807.11164.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shufflenetv2(width_scale=(61.0 / 29.0), model_name="shufflenetv2_w2", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
shufflenetv2_wd2,
shufflenetv2_w1,
shufflenetv2_w3d2,
shufflenetv2_w2,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shufflenetv2_wd2 or weight_count == 1366792)
assert (model != shufflenetv2_w1 or weight_count == 2278604)
assert (model != shufflenetv2_w3d2 or weight_count == 4406098)
assert (model != shufflenetv2_w2 or weight_count == 7601686)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 12,524 | 32.4 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/fishnet.py | """
FishNet for ImageNet-1K, implemented in Gluon.
Original paper: 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
"""
__all__ = ['FishNet', 'fishnet99', 'fishnet150', 'ChannelSqueeze']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import Identity
from .common import pre_conv1x1_block, pre_conv3x3_block, conv1x1, SesquialteralHourglass, InterpolationBlock
from .preresnet import PreResActivation
from .senet import SEInitBlock
def channel_squeeze(x,
channels_per_group):
"""
Channel squeeze operation.
Parameters:
----------
x : NDArray
Input tensor.
channels_per_group : int
Number of channels per group.
Returns:
-------
NDArray
Resulted tensor.
"""
return x.reshape((0, -4, channels_per_group, -1, -2)).sum(axis=2)
class ChannelSqueeze(HybridBlock):
"""
Channel squeeze layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups,
**kwargs):
super(ChannelSqueeze, self).__init__(**kwargs)
assert (channels % groups == 0)
self.channels_per_group = channels // groups
def hybrid_forward(self, F, x):
return channel_squeeze(x, self.channels_per_group)
class PreSEAttBlock(HybridBlock):
"""
FishNet specific Squeeze-and-Excitation attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
reduction : int, default 16
Squeeze reduction value.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
reduction=16,
**kwargs):
super(PreSEAttBlock, self).__init__(**kwargs)
mid_cannels = out_channels // reduction
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.relu = nn.Activation("relu")
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_cannels,
use_bias=True)
self.conv2 = conv1x1(
in_channels=mid_cannels,
out_channels=out_channels,
use_bias=True)
self.sigmoid = nn.Activation("sigmoid")
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.relu(x)
x = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.sigmoid(x)
return x
class FishBottleneck(HybridBlock):
"""
FishNet bottleneck block for residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Dilation value for convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
dilation,
bn_use_global_stats,
**kwargs):
super(FishBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
padding=dilation,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class FishBlock(HybridBlock):
"""
FishNet block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
squeeze : bool, default False
Whether to use a channel squeeze operation.
"""
def __init__(self,
in_channels,
out_channels,
strides=1,
dilation=1,
bn_use_global_stats=False,
squeeze=False,
**kwargs):
super(FishBlock, self).__init__(**kwargs)
self.squeeze = squeeze
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = FishBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats)
if self.squeeze:
assert (in_channels // 2 == out_channels)
self.c_squeeze = ChannelSqueeze(
channels=in_channels,
groups=2)
elif self.resize_identity:
self.identity_conv = pre_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
if self.squeeze:
identity = self.c_squeeze(x)
elif self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
return x
class DownUnit(HybridBlock):
"""
FishNet down unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_use_global_stats,
**kwargs):
super(DownUnit, self).__init__(**kwargs)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.pool = nn.MaxPool2D(
pool_size=2,
strides=2)
def hybrid_forward(self, F, x):
x = self.blocks(x)
x = self.pool(x)
return x
class UpUnit(HybridBlock):
"""
FishNet up unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
dilation=1,
bn_use_global_stats=False,
**kwargs):
super(UpUnit, self).__init__(**kwargs)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
squeeze = (dilation > 1) and (i == 0)
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
squeeze=squeeze))
in_channels = out_channels
self.upsample = InterpolationBlock(scale_factor=2, bilinear=False)
def hybrid_forward(self, F, x):
x = self.blocks(x)
x = self.upsample(x)
return x
class SkipUnit(HybridBlock):
"""
FishNet skip connection unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_use_global_stats,
**kwargs):
super(SkipUnit, self).__init__(**kwargs)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
def hybrid_forward(self, F, x):
x = self.blocks(x)
return x
class SkipAttUnit(HybridBlock):
"""
FishNet skip connection unit with attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_use_global_stats,
**kwargs):
super(SkipAttUnit, self).__init__(**kwargs)
mid_channels1 = in_channels // 2
mid_channels2 = 2 * in_channels
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels1,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv1x1_block(
in_channels=mid_channels1,
out_channels=mid_channels2,
use_bias=True,
bn_use_global_stats=bn_use_global_stats)
in_channels = mid_channels2
self.se = PreSEAttBlock(
in_channels=mid_channels2,
out_channels=out_channels_list[-1],
bn_use_global_stats=bn_use_global_stats)
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
w = self.se(x)
x = self.blocks(x)
x = F.broadcast_add(F.broadcast_mul(x, w), w)
return x
class FishFinalBlock(HybridBlock):
"""
FishNet final block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_use_global_stats,
**kwargs):
super(FishFinalBlock, self).__init__(**kwargs)
mid_channels = in_channels // 2
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.preactiv = PreResActivation(
in_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.preactiv(x)
return x
class FishNet(HybridBlock):
"""
FishNet model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
direct_channels : list of list of list of int
Number of output channels for each unit along the straight path.
skip_channels : list of list of list of int
Number of output channels for each skip connection unit.
init_block_channels : int
Number of output channels for the initial unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
direct_channels,
skip_channels,
init_block_channels,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(FishNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
depth = len(direct_channels[0])
down1_channels = direct_channels[0]
up_channels = direct_channels[1]
down2_channels = direct_channels[2]
skip1_channels = skip_channels[0]
skip2_channels = skip_channels[1]
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
down1_seq = nn.HybridSequential(prefix="")
skip1_seq = nn.HybridSequential(prefix="")
for i in range(depth + 1):
skip1_channels_list = skip1_channels[i]
if i < depth:
skip1_seq.add(SkipUnit(
in_channels=in_channels,
out_channels_list=skip1_channels_list,
bn_use_global_stats=bn_use_global_stats))
down1_channels_list = down1_channels[i]
down1_seq.add(DownUnit(
in_channels=in_channels,
out_channels_list=down1_channels_list,
bn_use_global_stats=bn_use_global_stats))
in_channels = down1_channels_list[-1]
else:
skip1_seq.add(SkipAttUnit(
in_channels=in_channels,
out_channels_list=skip1_channels_list,
bn_use_global_stats=bn_use_global_stats))
in_channels = skip1_channels_list[-1]
up_seq = nn.HybridSequential(prefix="")
skip2_seq = nn.HybridSequential(prefix="")
for i in range(depth + 1):
skip2_channels_list = skip2_channels[i]
if i > 0:
in_channels += skip1_channels[depth - i][-1]
if i < depth:
skip2_seq.add(SkipUnit(
in_channels=in_channels,
out_channels_list=skip2_channels_list,
bn_use_global_stats=bn_use_global_stats))
up_channels_list = up_channels[i]
dilation = 2 ** i
up_seq.add(UpUnit(
in_channels=in_channels,
out_channels_list=up_channels_list,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats))
in_channels = up_channels_list[-1]
else:
skip2_seq.add(Identity())
down2_seq = nn.HybridSequential(prefix="")
for i in range(depth):
down2_channels_list = down2_channels[i]
down2_seq.add(DownUnit(
in_channels=in_channels,
out_channels_list=down2_channels_list,
bn_use_global_stats=bn_use_global_stats))
in_channels = down2_channels_list[-1] + skip2_channels[depth - 1 - i][-1]
self.features.add(SesquialteralHourglass(
down1_seq=down1_seq,
skip1_seq=skip1_seq,
up_seq=up_seq,
skip2_seq=skip2_seq,
down2_seq=down2_seq))
self.features.add(FishFinalBlock(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = in_channels // 2
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=True))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_fishnet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create FishNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 99:
direct_layers = [[2, 2, 6], [1, 1, 1], [1, 2, 2]]
skip_layers = [[1, 1, 1, 2], [4, 1, 1, 0]]
elif blocks == 150:
direct_layers = [[2, 4, 8], [2, 2, 2], [2, 2, 4]]
skip_layers = [[2, 2, 2, 4], [4, 2, 2, 0]]
else:
raise ValueError("Unsupported FishNet with number of blocks: {}".format(blocks))
direct_channels_per_layers = [[128, 256, 512], [512, 384, 256], [320, 832, 1600]]
skip_channels_per_layers = [[64, 128, 256, 512], [512, 768, 512, 0]]
direct_channels = [[[b] * c for (b, c) in zip(*a)] for a in
([(ci, li) for (ci, li) in zip(direct_channels_per_layers, direct_layers)])]
skip_channels = [[[b] * c for (b, c) in zip(*a)] for a in
([(ci, li) for (ci, li) in zip(skip_channels_per_layers, skip_layers)])]
init_block_channels = 64
net = FishNet(
direct_channels=direct_channels,
skip_channels=skip_channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def fishnet99(**kwargs):
"""
FishNet-99 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fishnet(blocks=99, model_name="fishnet99", **kwargs)
def fishnet150(**kwargs):
"""
FishNet-150 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fishnet(blocks=150, model_name="fishnet150", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
fishnet99,
fishnet150,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != fishnet99 or weight_count == 16628904)
assert (model != fishnet150 or weight_count == 24959400)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 23,458 | 33.097384 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/hrnet.py | """
HRNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
"""
__all__ = ['hrnet_w18_small_v1', 'hrnet_w18_small_v2', 'hrnetv2_w18', 'hrnetv2_w30', 'hrnetv2_w32', 'hrnetv2_w40',
'hrnetv2_w44', 'hrnetv2_w48', 'hrnetv2_w64']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import Identity
from .common import conv1x1_block, conv3x3_block, DualPathSequential
from .resnet import ResUnit
class UpSamplingBlock(HybridBlock):
"""
HFNet specific upsampling block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
scale_factor : int
Multiplier for spatial size.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
scale_factor,
**kwargs):
super(UpSamplingBlock, self).__init__(**kwargs)
self.scale_factor = scale_factor
with self.name_scope():
self.conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=1,
activation=None,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv(x)
return F.UpSampling(x, scale=self.scale_factor, sample_type="nearest")
class HRBlock(HybridBlock):
"""
HFNet block.
Parameters:
----------
in_channels_list : list of int
Number of input channels.
out_channels_list : list of int
Number of output channels.
num_branches : int
Number of branches.
num_subblocks : list of int
Number of subblock.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels_list,
out_channels_list,
num_branches,
num_subblocks,
bn_use_global_stats,
**kwargs):
super(HRBlock, self).__init__(**kwargs)
self.in_channels_list = in_channels_list
self.num_branches = num_branches
with self.name_scope():
self.branches = nn.HybridSequential(prefix="")
for i in range(num_branches):
layers = nn.HybridSequential(prefix="branch{}_".format(i + 1))
in_channels_i = self.in_channels_list[i]
out_channels_i = out_channels_list[i]
for j in range(num_subblocks[i]):
layers.add(ResUnit(
in_channels=in_channels_i,
out_channels=out_channels_i,
strides=1,
bottleneck=False,
bn_use_global_stats=bn_use_global_stats))
in_channels_i = out_channels_i
self.in_channels_list[i] = out_channels_i
self.branches.add(layers)
if num_branches > 1:
self.fuse_layers = nn.HybridSequential(prefix="")
for i in range(num_branches):
fuse_layer = nn.HybridSequential(prefix="fuselayer{}_".format(i + 1))
with fuse_layer.name_scope():
for j in range(num_branches):
if j > i:
fuse_layer.add(UpSamplingBlock(
in_channels=in_channels_list[j],
out_channels=in_channels_list[i],
bn_use_global_stats=bn_use_global_stats,
scale_factor=2 ** (j - i)))
elif j == i:
fuse_layer.add(Identity())
else:
conv3x3_seq = nn.HybridSequential(prefix="conv3x3seq{}_".format(j + 1))
with conv3x3_seq.name_scope():
for k in range(i - j):
if k == i - j - 1:
conv3x3_seq.add(conv3x3_block(
in_channels=in_channels_list[j],
out_channels=in_channels_list[i],
strides=2,
activation=None,
bn_use_global_stats=bn_use_global_stats))
else:
conv3x3_seq.add(conv3x3_block(
in_channels=in_channels_list[j],
out_channels=in_channels_list[j],
strides=2,
bn_use_global_stats=bn_use_global_stats))
fuse_layer.add(conv3x3_seq)
self.fuse_layers.add(fuse_layer)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x0, x):
for i in range(self.num_branches):
x[i] = self.branches[i](x[i])
if self.num_branches == 1:
return x
x_fuse = []
for i in range(len(self.fuse_layers)):
y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
for j in range(1, self.num_branches):
if i == j:
y = y + x[j]
else:
y = y + self.fuse_layers[i][j](x[j])
x_fuse.append(self.activ(y))
return x0, x_fuse
class HRStage(HybridBlock):
"""
HRNet stage block.
Parameters:
----------
in_channels_list : list of int
Number of output channels from the previous layer.
out_channels_list : list of int
Number of output channels in the current layer.
num_modules : int
Number of modules.
num_branches : int
Number of branches.
num_subblocks : list of int
Number of subblocks.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels_list,
out_channels_list,
num_modules,
num_branches,
num_subblocks,
bn_use_global_stats,
**kwargs):
super(HRStage, self).__init__(**kwargs)
self.branches = num_branches
self.in_channels_list = out_channels_list
in_branches = len(in_channels_list)
out_branches = len(out_channels_list)
with self.name_scope():
self.transition = nn.HybridSequential(prefix="")
for i in range(out_branches):
if i < in_branches:
if out_channels_list[i] != in_channels_list[i]:
self.transition.add(conv3x3_block(
in_channels=in_channels_list[i],
out_channels=out_channels_list[i],
strides=1,
bn_use_global_stats=bn_use_global_stats))
else:
self.transition.add(Identity())
else:
conv3x3_seq = nn.HybridSequential(prefix="conv3x3_seq{}_".format(i + 1))
for j in range(i + 1 - in_branches):
in_channels_i = in_channels_list[-1]
out_channels_i = out_channels_list[i] if j == i - in_branches else in_channels_i
conv3x3_seq.add(conv3x3_block(
in_channels=in_channels_i,
out_channels=out_channels_i,
strides=2,
bn_use_global_stats=bn_use_global_stats))
self.transition.add(conv3x3_seq)
self.layers = DualPathSequential(prefix="")
for i in range(num_modules):
self.layers.add(HRBlock(
in_channels_list=self.in_channels_list,
out_channels_list=out_channels_list,
num_branches=num_branches,
num_subblocks=num_subblocks,
bn_use_global_stats=bn_use_global_stats))
self.in_channels_list = self.layers[-1].in_channels_list
def hybrid_forward(self, F, x0, x):
x_list = []
for j in range(self.branches):
if not isinstance(self.transition[j], Identity):
x_list.append(self.transition[j](x[-1] if type(x) in (list, tuple) else x))
else:
x_list_j = x[j] if type(x) in (list, tuple) else x
x_list.append(x_list_j)
_, y_list = self.layers(x0, x_list)
return x0, y_list
class HRInitBlock(HybridBlock):
"""
HRNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
num_subblocks : int
Number of subblocks.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
num_subblocks,
bn_use_global_stats,
**kwargs):
super(HRInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats)
in_channels = mid_channels
self.subblocks = nn.HybridSequential(prefix="")
for i in range(num_subblocks):
self.subblocks.add(ResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=1,
bottleneck=True,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.subblocks(x)
return x
class HRFinalBlock(HybridBlock):
"""
HRNet specific final block.
Parameters:
----------
in_channels_list : list of int
Number of input channels per stage.
out_channels_list : list of int
Number of output channels per stage.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels_list,
out_channels_list,
bn_use_global_stats,
**kwargs):
super(HRFinalBlock, self).__init__(**kwargs)
with self.name_scope():
self.inc_blocks = nn.HybridSequential(prefix="")
for i, in_channels_i in enumerate(in_channels_list):
self.inc_blocks.add(ResUnit(
in_channels=in_channels_i,
out_channels=out_channels_list[i],
strides=1,
bottleneck=True,
bn_use_global_stats=bn_use_global_stats))
self.down_blocks = nn.HybridSequential(prefix="")
for i in range(len(in_channels_list) - 1):
self.down_blocks.add(conv3x3_block(
in_channels=out_channels_list[i],
out_channels=out_channels_list[i + 1],
strides=2,
use_bias=True,
bn_use_global_stats=bn_use_global_stats))
self.final_layer = conv1x1_block(
in_channels=1024,
out_channels=2048,
strides=1,
use_bias=True,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x0, x):
y = self.inc_blocks[0](x[0])
for i in range(len(self.down_blocks)):
y = self.inc_blocks[i + 1](x[i + 1]) + self.down_blocks[i](y)
y = self.final_layer(y)
return y, y
class HRNet(HybridBlock):
"""
HRNet model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
channels : list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
init_num_subblocks : int
Number of subblocks in the initial unit.
num_modules : int
Number of modules per stage.
num_subblocks : list of int
Number of subblocks per stage.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
init_num_subblocks,
num_modules,
num_subblocks,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(HRNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.branches = [2, 3, 4]
with self.name_scope():
self.features = DualPathSequential(
first_ordinals=1,
last_ordinals=1,
dual_path_scheme_ordinal=(lambda block, x1, x2: (x1, block(x2))),
prefix="")
self.features.add(HRInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
mid_channels=64,
num_subblocks=init_num_subblocks,
bn_use_global_stats=bn_use_global_stats))
in_channels_list = [init_block_channels]
for i in range(len(self.branches)):
self.features.add(HRStage(
in_channels_list=in_channels_list,
out_channels_list=channels[i],
num_modules=num_modules[i],
num_branches=self.branches[i],
num_subblocks=num_subblocks[i],
bn_use_global_stats=bn_use_global_stats))
in_channels_list = self.features[-1].in_channels_list
self.features.add(HRFinalBlock(
in_channels_list=in_channels_list,
out_channels_list=[128, 256, 512, 1024],
bn_use_global_stats=bn_use_global_stats))
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=2048))
def hybrid_forward(self, F, x):
_, x = self.features(x, x)
x = self.output(x)
return x
def get_hrnet(version,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create HRNet model with specific parameters.
Parameters:
----------
version : str
Version of MobileNetV3 ('s' or 'm').
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version == "w18s1":
init_block_channels = 128
init_num_subblocks = 1
channels = [[16, 32], [16, 32, 64], [16, 32, 64, 128]]
num_modules = [1, 1, 1]
elif version == "w18s2":
init_block_channels = 256
init_num_subblocks = 2
channels = [[18, 36], [18, 36, 72], [18, 36, 72, 144]]
num_modules = [1, 3, 2]
elif version == "w18":
init_block_channels = 256
init_num_subblocks = 4
channels = [[18, 36], [18, 36, 72], [18, 36, 72, 144]]
num_modules = [1, 4, 3]
elif version == "w30":
init_block_channels = 256
init_num_subblocks = 4
channels = [[30, 60], [30, 60, 120], [30, 60, 120, 240]]
num_modules = [1, 4, 3]
elif version == "w32":
init_block_channels = 256
init_num_subblocks = 4
channels = [[32, 64], [32, 64, 128], [32, 64, 128, 256]]
num_modules = [1, 4, 3]
elif version == "w40":
init_block_channels = 256
init_num_subblocks = 4
channels = [[40, 80], [40, 80, 160], [40, 80, 160, 320]]
num_modules = [1, 4, 3]
elif version == "w44":
init_block_channels = 256
init_num_subblocks = 4
channels = [[44, 88], [44, 88, 176], [44, 88, 176, 352]]
num_modules = [1, 4, 3]
elif version == "w48":
init_block_channels = 256
init_num_subblocks = 4
channels = [[48, 96], [48, 96, 192], [48, 96, 192, 384]]
num_modules = [1, 4, 3]
elif version == "w64":
init_block_channels = 256
init_num_subblocks = 4
channels = [[64, 128], [64, 128, 256], [64, 128, 256, 512]]
num_modules = [1, 4, 3]
else:
raise ValueError("Unsupported HRNet version {}".format(version))
num_subblocks = [[max(2, init_num_subblocks)] * len(ci) for ci in channels]
net = HRNet(
channels=channels,
init_block_channels=init_block_channels,
init_num_subblocks=init_num_subblocks,
num_modules=num_modules,
num_subblocks=num_subblocks,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def hrnet_w18_small_v1(**kwargs):
"""
HRNet-W18 Small V1 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w18s1", model_name="hrnet_w18_small_v1", **kwargs)
def hrnet_w18_small_v2(**kwargs):
"""
HRNet-W18 Small V2 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w18s2", model_name="hrnet_w18_small_v2", **kwargs)
def hrnetv2_w18(**kwargs):
"""
HRNetV2-W18 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w18", model_name="hrnetv2_w18", **kwargs)
def hrnetv2_w30(**kwargs):
"""
HRNetV2-W30 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w30", model_name="hrnetv2_w30", **kwargs)
def hrnetv2_w32(**kwargs):
"""
HRNetV2-W32 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w32", model_name="hrnetv2_w32", **kwargs)
def hrnetv2_w40(**kwargs):
"""
HRNetV2-W40 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w40", model_name="hrnetv2_w40", **kwargs)
def hrnetv2_w44(**kwargs):
"""
HRNetV2-W44 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w44", model_name="hrnetv2_w44", **kwargs)
def hrnetv2_w48(**kwargs):
"""
HRNetV2-W48 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w48", model_name="hrnetv2_w48", **kwargs)
def hrnetv2_w64(**kwargs):
"""
HRNetV2-W64 model from 'Deep High-Resolution Representation Learning for Visual Recognition,'
https://arxiv.org/abs/1908.07919.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_hrnet(version="w64", model_name="hrnetv2_w64", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
hrnet_w18_small_v1,
hrnet_w18_small_v2,
hrnetv2_w18,
hrnetv2_w30,
hrnetv2_w32,
hrnetv2_w40,
hrnetv2_w44,
hrnetv2_w48,
hrnetv2_w64,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != hrnet_w18_small_v1 or weight_count == 13187464)
assert (model != hrnet_w18_small_v2 or weight_count == 15597464)
assert (model != hrnetv2_w18 or weight_count == 21299004)
assert (model != hrnetv2_w30 or weight_count == 37712220)
assert (model != hrnetv2_w32 or weight_count == 41232680)
assert (model != hrnetv2_w40 or weight_count == 57557160)
assert (model != hrnetv2_w44 or weight_count == 67064984)
assert (model != hrnetv2_w48 or weight_count == 77469864)
assert (model != hrnetv2_w64 or weight_count == 128059944)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 26,230 | 35.381415 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/fcn8sd.py | """
FCN-8s(d) for image segmentation, implemented in Gluon.
Original paper: 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038.
"""
__all__ = ['FCN8sd', 'fcn8sd_resnetd50b_voc', 'fcn8sd_resnetd101b_voc', 'fcn8sd_resnetd50b_coco',
'fcn8sd_resnetd101b_coco', 'fcn8sd_resnetd50b_ade20k', 'fcn8sd_resnetd101b_ade20k',
'fcn8sd_resnetd50b_cityscapes', 'fcn8sd_resnetd101b_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv3x3_block
from .resnetd import resnetd50b, resnetd101b
class FCNFinalBlock(HybridBlock):
"""
FCN-8s(d) final block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bottleneck_factor : int, default 4
Bottleneck factor.
"""
def __init__(self,
in_channels,
out_channels,
bottleneck_factor=4,
**kwargs):
super(FCNFinalBlock, self).__init__(**kwargs)
assert (in_channels % bottleneck_factor == 0)
mid_channels = in_channels // bottleneck_factor
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels)
self.dropout = nn.Dropout(rate=0.1)
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True)
def hybrid_forward(self, F, x, out_size):
x = self.conv1(x)
x = self.dropout(x)
x = self.conv2(x)
x = F.contrib.BilinearResize2D(x, height=out_size[0], width=out_size[1])
return x
class FCN8sd(HybridBlock):
"""
FCN-8s(d) model from 'Fully Convolutional Networks for Semantic Segmentation,' https://arxiv.org/abs/1411.4038.
It is an experimental model mixed FCN-8s and PSPNet.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
backbone_out_channels : int, default 2048
Number of output channels form feature extractor.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
"""
def __init__(self,
backbone,
backbone_out_channels=2048,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
classes=21,
**kwargs):
super(FCN8sd, self).__init__(**kwargs)
assert (in_channels > 0)
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
with self.name_scope():
self.backbone = backbone
pool_out_channels = backbone_out_channels
self.final_block = FCNFinalBlock(
in_channels=pool_out_channels,
out_channels=classes)
if self.aux:
aux_out_channels = backbone_out_channels // 2
self.aux_block = FCNFinalBlock(
in_channels=aux_out_channels,
out_channels=classes)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.fixed_size else x.shape[2:]
x, y = self.backbone(x)
x = self.final_block(x, in_size)
if self.aux:
y = self.aux_block(y, in_size)
return x, y
else:
return x
def get_fcn8sd(backbone,
classes,
aux=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create FCN-8s(d) model with specific parameters.
Parameters:
----------
backbone : nn.Sequential
Feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = FCN8sd(
backbone=backbone,
classes=classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def fcn8sd_resnetd50b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for Pascal VOC from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd50b_voc", **kwargs)
def fcn8sd_resnetd101b_voc(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for Pascal VOC from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd101b_voc", **kwargs)
def fcn8sd_resnetd50b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for COCO from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd50b_coco", **kwargs)
def fcn8sd_resnetd101b_coco(pretrained_backbone=False, classes=21, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for COCO from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 21
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd101b_coco", **kwargs)
def fcn8sd_resnetd50b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for ADE20K from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd50b_ade20k", **kwargs)
def fcn8sd_resnetd101b_ade20k(pretrained_backbone=False, classes=150, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for ADE20K from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 150
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd101b_ade20k", **kwargs)
def fcn8sd_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-50b for Cityscapes from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd50b_cityscapes", **kwargs)
def fcn8sd_resnetd101b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
FCN-8s(d) model on the base of ResNet(D)-101b for Cityscapes from 'Fully Convolutional Networks for Semantic
Segmentation,' https://arxiv.org/abs/1411.4038.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd101b(pretrained=pretrained_backbone, ordinary_init=False, bends=(3,)).features[:-1]
return get_fcn8sd(backbone=backbone, classes=classes, aux=aux, model_name="fcn8sd_resnetd101b_cityscapes", **kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (480, 480)
aux = False
pretrained = False
models = [
(fcn8sd_resnetd50b_voc, 21),
(fcn8sd_resnetd101b_voc, 21),
(fcn8sd_resnetd50b_coco, 21),
(fcn8sd_resnetd101b_coco, 21),
(fcn8sd_resnetd50b_ade20k, 150),
(fcn8sd_resnetd101b_ade20k, 150),
(fcn8sd_resnetd50b_cityscapes, 19),
(fcn8sd_resnetd101b_cityscapes, 19),
]
for model, classes in models:
net = model(pretrained=pretrained, in_size=in_size, aux=aux)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
if aux:
assert (model != fcn8sd_resnetd50b_voc or weight_count == 35445994)
assert (model != fcn8sd_resnetd101b_voc or weight_count == 54438122)
assert (model != fcn8sd_resnetd50b_coco or weight_count == 35445994)
assert (model != fcn8sd_resnetd101b_coco or weight_count == 54438122)
assert (model != fcn8sd_resnetd50b_ade20k or weight_count == 35545324)
assert (model != fcn8sd_resnetd101b_ade20k or weight_count == 54537452)
assert (model != fcn8sd_resnetd50b_cityscapes or weight_count == 35444454)
assert (model != fcn8sd_resnetd101b_cityscapes or weight_count == 54436582)
else:
assert (model != fcn8sd_resnetd50b_voc or weight_count == 33080789)
assert (model != fcn8sd_resnetd101b_voc or weight_count == 52072917)
assert (model != fcn8sd_resnetd50b_coco or weight_count == 33080789)
assert (model != fcn8sd_resnetd101b_coco or weight_count == 52072917)
assert (model != fcn8sd_resnetd50b_ade20k or weight_count == 33146966)
assert (model != fcn8sd_resnetd101b_ade20k or weight_count == 52139094)
assert (model != fcn8sd_resnetd50b_cityscapes or weight_count == 33079763)
assert (model != fcn8sd_resnetd101b_cityscapes or weight_count == 52071891)
x = mx.nd.zeros((1, 3, in_size[0], in_size[1]), ctx=ctx)
ys = net(x)
y = ys[0] if aux else ys
assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and
(y.shape[3] == x.shape[3]))
if __name__ == "__main__":
_test()
| 16,570 | 38.267773 | 120 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/selecsls.py | """
SelecSLS for ImageNet-1K, implemented in Gluon.
Original paper: 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
"""
__all__ = ['SelecSLS', 'selecsls42', 'selecsls42b', 'selecsls60', 'selecsls60b', 'selecsls84']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, DualPathSequential
class SelecSLSBlock(HybridBlock):
"""
SelecSLS block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
**kwargs):
super(SelecSLSBlock, self).__init__(**kwargs)
mid_channels = 2 * out_channels
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class SelecSLSUnit(HybridBlock):
"""
SelecSLS unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
skip_channels : int
Number of skipped channels.
mid_channels : int
Number of middle channels.
strides : int or tuple/list of 2 int
Strides of the branch convolution layers.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
skip_channels,
mid_channels,
strides,
bn_use_global_stats,
**kwargs):
super(SelecSLSUnit, self).__init__(**kwargs)
self.resize = (strides == 2)
mid2_channels = mid_channels // 2
last_channels = 2 * mid_channels + (skip_channels if strides == 1 else 0)
with self.name_scope():
self.branch1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
self.branch2 = SelecSLSBlock(
in_channels=mid_channels,
out_channels=mid2_channels,
bn_use_global_stats=bn_use_global_stats)
self.branch3 = SelecSLSBlock(
in_channels=mid2_channels,
out_channels=mid2_channels,
bn_use_global_stats=bn_use_global_stats)
self.last_conv = conv1x1_block(
in_channels=last_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x, x0=None):
x1 = self.branch1(x)
x2 = self.branch2(x1)
x3 = self.branch3(x2)
if self.resize:
y = F.concat(x1, x2, x3, dim=1)
y = self.last_conv(y)
return y, y
else:
y = F.concat(x1, x2, x3, x0, dim=1)
y = self.last_conv(y)
return y, x0
class SelecSLS(HybridBlock):
"""
SelecSLS model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
skip_channels : list of list of int
Number of skipped channels for each unit.
mid_channels : list of list of int
Number of middle channels for each unit.
kernels3 : list of list of int/bool
Using 3x3 (instead of 1x1) kernel for each head unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
skip_channels,
mid_channels,
kernels3,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SelecSLS, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
init_block_channels = 32
with self.name_scope():
self.features = DualPathSequential(
return_two=False,
first_ordinals=1,
last_ordinals=(1 + len(kernels3)),
prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
k = i - len(skip_channels)
stage = DualPathSequential(prefix="stage{}_".format(i + 1)) if k < 0 else\
nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if j == 0 else 1
if k < 0:
unit = SelecSLSUnit(
in_channels=in_channels,
out_channels=out_channels,
skip_channels=skip_channels[i][j],
mid_channels=mid_channels[i][j],
strides=strides,
bn_use_global_stats=bn_use_global_stats)
else:
conv_block_class = conv3x3_block if kernels3[k][j] == 1 else conv1x1_block
unit = conv_block_class(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
stage.add(unit)
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=4,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_selecsls(version,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SelecSLS model with specific parameters.
Parameters:
----------
version : str
Version of SelecSLS.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if version in ("42", "42b"):
channels = [[64, 128], [144, 288], [304, 480]]
skip_channels = [[0, 64], [0, 144], [0, 304]]
mid_channels = [[64, 64], [144, 144], [304, 304]]
kernels3 = [[1, 1], [1, 0]]
if version == "42":
head_channels = [[960, 1024], [1024, 1280]]
else:
head_channels = [[960, 1024], [1280, 1024]]
elif version in ("60", "60b"):
channels = [[64, 128], [128, 128, 288], [288, 288, 288, 416]]
skip_channels = [[0, 64], [0, 128, 128], [0, 288, 288, 288]]
mid_channels = [[64, 64], [128, 128, 128], [288, 288, 288, 288]]
kernels3 = [[1, 1], [1, 0]]
if version == "60":
head_channels = [[756, 1024], [1024, 1280]]
else:
head_channels = [[756, 1024], [1280, 1024]]
elif version == "84":
channels = [[64, 144], [144, 144, 144, 144, 304], [304, 304, 304, 304, 304, 512]]
skip_channels = [[0, 64], [0, 144, 144, 144, 144], [0, 304, 304, 304, 304, 304]]
mid_channels = [[64, 64], [144, 144, 144, 144, 144], [304, 304, 304, 304, 304, 304]]
kernels3 = [[1, 1], [1, 1]]
head_channels = [[960, 1024], [1024, 1280]]
else:
raise ValueError("Unsupported SelecSLS version {}".format(version))
channels += head_channels
net = SelecSLS(
channels=channels,
skip_channels=skip_channels,
mid_channels=mid_channels,
kernels3=kernels3,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def selecsls42(**kwargs):
"""
SelecSLS-42 model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="42", model_name="selecsls42", **kwargs)
def selecsls42b(**kwargs):
"""
SelecSLS-42b model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="42b", model_name="selecsls42b", **kwargs)
def selecsls60(**kwargs):
"""
SelecSLS-60 model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="60", model_name="selecsls60", **kwargs)
def selecsls60b(**kwargs):
"""
SelecSLS-60b model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="60b", model_name="selecsls60b", **kwargs)
def selecsls84(**kwargs):
"""
SelecSLS-84 model from 'XNect: Real-time Multi-person 3D Human Pose Estimation with a Single RGB Camera,'
https://arxiv.org/abs/1907.00837.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_selecsls(version="84", model_name="selecsls84", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
selecsls42,
selecsls42b,
selecsls60,
selecsls60b,
selecsls84,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != selecsls42 or weight_count == 30354952)
assert (model != selecsls42b or weight_count == 32458248)
assert (model != selecsls60 or weight_count == 30670768)
assert (model != selecsls60b or weight_count == 32774064)
assert (model != selecsls84 or weight_count == 50954600)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 14,256 | 33.943627 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/inceptionv4.py | """
InceptionV4 for ImageNet-1K, implemented in Gluon.
Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
"""
__all__ = ['InceptionV4', 'inceptionv4']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent
from .common import ConvBlock, conv3x3_block
from .inceptionv3 import MaxPoolBranch, AvgPoolBranch, Conv1x1Branch, ConvSeqBranch
class Conv3x3Branch(HybridBlock):
"""
InceptionV4 specific convolutional 3x3 branch block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(Conv3x3Branch, self).__init__(**kwargs)
with self.name_scope():
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=2,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv(x)
return x
class ConvSeq3x3Branch(HybridBlock):
"""
InceptionV4 specific convolutional sequence branch block with splitting by 3x3.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels_list : list of tuple of int
List of numbers of output channels for middle layers.
kernel_size_list : list of tuple of int or tuple of tuple/list of 2 int
List of convolution window sizes.
strides_list : list of tuple of int or tuple of tuple/list of 2 int
List of strides of the convolution.
padding_list : list of tuple of int or tuple of tuple/list of 2 int
List of padding values for convolution layers.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels_list,
kernel_size_list,
strides_list,
padding_list,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(ConvSeq3x3Branch, self).__init__(**kwargs)
with self.name_scope():
self.conv_list = nn.HybridSequential(prefix="")
for i, (mid_channels, kernel_size, strides, padding) in enumerate(zip(
mid_channels_list, kernel_size_list, strides_list, padding_list)):
self.conv_list.add(ConvBlock(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
in_channels = mid_channels
self.conv1x3 = ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(1, 3),
strides=1,
padding=(0, 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv3x1 = ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 1),
strides=1,
padding=(1, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv_list(x)
y1 = self.conv1x3(x)
y2 = self.conv3x1(x)
x = F.concat(y1, y2, dim=1)
return x
class InceptionAUnit(HybridBlock):
"""
InceptionV4 type Inception-A unit.
Parameters:
----------
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptionAUnit, self).__init__(**kwargs)
in_channels = 384
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=96,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(64, 96, 96),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(AvgPoolBranch(
in_channels=in_channels,
out_channels=96,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
count_include_pad=False))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class ReductionAUnit(HybridBlock):
"""
InceptionV4 type Reduction-A unit.
Parameters:
----------
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(ReductionAUnit, self).__init__(**kwargs)
in_channels = 384
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(384,),
kernel_size_list=(3,),
strides_list=(2,),
padding_list=(0,),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 224, 256),
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(MaxPoolBranch())
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptionBUnit(HybridBlock):
"""
InceptionV4 type Inception-B unit.
Parameters:
----------
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptionBUnit, self).__init__(**kwargs)
in_channels = 1024
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=384,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 224, 256),
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 192, 224, 224, 256),
kernel_size_list=(1, (7, 1), (1, 7), (7, 1), (1, 7)),
strides_list=(1, 1, 1, 1, 1),
padding_list=(0, (3, 0), (0, 3), (3, 0), (0, 3)),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(AvgPoolBranch(
in_channels=in_channels,
out_channels=128,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
count_include_pad=False))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class ReductionBUnit(HybridBlock):
"""
InceptionV4 type Reduction-B unit.
Parameters:
----------
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(ReductionBUnit, self).__init__(**kwargs)
in_channels = 1024
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(192, 192),
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=(256, 256, 320, 320),
kernel_size_list=(1, (1, 7), (7, 1), 3),
strides_list=(1, 1, 1, 2),
padding_list=(0, (0, 3), (3, 0), 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(MaxPoolBranch())
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptionCUnit(HybridBlock):
"""
InceptionV4 type Inception-C unit.
Parameters:
----------
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptionCUnit, self).__init__(**kwargs)
in_channels = 1536
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=256,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeq3x3Branch(
in_channels=in_channels,
out_channels=256,
mid_channels_list=(384,),
kernel_size_list=(1,),
strides_list=(1,),
padding_list=(0,),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeq3x3Branch(
in_channels=in_channels,
out_channels=256,
mid_channels_list=(384, 448, 512),
kernel_size_list=(1, (3, 1), (1, 3)),
strides_list=(1, 1, 1),
padding_list=(0, (1, 0), (0, 1)),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(AvgPoolBranch(
in_channels=in_channels,
out_channels=256,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
count_include_pad=False))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptBlock3a(HybridBlock):
"""
InceptionV4 type Mixed-3a block.
Parameters:
----------
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptBlock3a, self).__init__(**kwargs)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(MaxPoolBranch())
self.branches.add(Conv3x3Branch(
in_channels=64,
out_channels=96,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptBlock4a(HybridBlock):
"""
InceptionV4 type Mixed-4a block.
Parameters:
----------
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptBlock4a, self).__init__(**kwargs)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(ConvSeqBranch(
in_channels=160,
out_channels_list=(64, 96),
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=160,
out_channels_list=(64, 64, 64, 96),
kernel_size_list=(1, (1, 7), (7, 1), 3),
strides_list=(1, 1, 1, 1),
padding_list=(0, (0, 3), (3, 0), 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptBlock5a(HybridBlock):
"""
InceptionV4 type Mixed-5a block.
Parameters:
----------
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptBlock5a, self).__init__(**kwargs)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv3x3Branch(
in_channels=192,
out_channels=192,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(MaxPoolBranch())
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptInitBlock(HybridBlock):
"""
InceptionV4 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
strides=2,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
strides=1,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
strides=1,
padding=1,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.block1 = InceptBlock3a(bn_epsilon=bn_epsilon, bn_use_global_stats=bn_use_global_stats)
self.block2 = InceptBlock4a(bn_epsilon=bn_epsilon, bn_use_global_stats=bn_use_global_stats)
self.block3 = InceptBlock5a(bn_epsilon=bn_epsilon, bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
return x
class InceptionV4(HybridBlock):
"""
InceptionV4 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (299, 299)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
dropout_rate=0.0,
bn_epsilon=1e-5,
bn_use_global_stats=False,
in_channels=3,
in_size=(299, 299),
classes=1000,
**kwargs):
super(InceptionV4, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
layers = [4, 8, 4]
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(InceptInitBlock(
in_channels=in_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
for i, layers_per_stage in enumerate(layers):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j in range(layers_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
else:
unit = normal_units[i]
stage.add(unit(bn_epsilon=bn_epsilon, bn_use_global_stats=bn_use_global_stats))
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
if dropout_rate > 0.0:
self.output.add(nn.Dropout(rate=dropout_rate))
self.output.add(nn.Dense(
units=classes,
in_units=1536))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_inceptionv4(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create InceptionV4 model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = InceptionV4(**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def inceptionv4(**kwargs):
"""
InceptionV4 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_inceptionv4(model_name="inceptionv4", bn_epsilon=1e-3, **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
inceptionv4,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != inceptionv4 or weight_count == 42679816)
x = mx.nd.random.normal(shape=(1, 3, 299, 299), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 23,613 | 33.573939 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/regnet.py | """
RegNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
"""
__all__ = ['RegNet', 'regnetx002', 'regnetx004', 'regnetx006', 'regnetx008', 'regnetx016', 'regnetx032', 'regnetx040',
'regnetx064', 'regnetx080', 'regnetx120', 'regnetx160', 'regnetx320', 'regnety002', 'regnety004',
'regnety006', 'regnety008', 'regnety016', 'regnety032', 'regnety040', 'regnety064', 'regnety080',
'regnety120', 'regnety160', 'regnety320', 'regnetz002', 'regnetw002']
import os
import numpy as np
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, SEBlock
class RegNetBottleneck(HybridBlock):
"""
RegNet bottleneck block for residual path in RegNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
groups : int
Number of groups.
use_se : bool
Whether to use SE-module.
bottleneck_factor : int, default 1
Bottleneck factor.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
strides,
groups,
use_se,
bottleneck_factor=1,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(RegNetBottleneck, self).__init__(**kwargs)
self.use_se = use_se
mid_channels = out_channels // bottleneck_factor
mid_groups = mid_channels // groups
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
groups=mid_groups,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
mid_channels=(in_channels // 4))
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
if self.use_se:
x = self.se(x)
x = self.conv3(x)
return x
class RegNetUnit(HybridBlock):
"""
RegNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
groups : int
Number of groups.
use_se : bool
Whether to use SE-module.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
strides,
groups,
use_se,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(RegNetUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = RegNetBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
groups=groups,
use_se=use_se,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class RegNet(HybridBlock):
"""
RegNet model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
groups : list of int
Number of groups for each stage.
use_se : bool
Whether to use SE-module.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
groups,
use_se,
bn_use_global_stats=False,
bn_cudnn_off=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(RegNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
padding=1,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = init_block_channels
for i, (channels_per_stage, groups_per_stage) in enumerate(zip(channels, groups)):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) else 1
stage.add(RegNetUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
groups=groups_per_stage,
use_se=use_se,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_regnet(channels_init,
channels_slope,
channels_mult,
depth,
groups,
use_se=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create RegNet model with specific parameters.
Parameters:
----------
channels_init : float
Initial value for channels/widths.
channels_slope : float
Slope value for channels/widths.
width_mult : float
Width multiplier value.
groups : int
Number of groups.
depth : int
Depth value.
use_se : bool, default False
Whether to use SE-module.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
divisor = 8
assert (channels_slope >= 0) and (channels_init > 0) and (channels_mult > 1) and (channels_init % divisor == 0)
# Generate continuous per-block channels/widths:
channels_cont = np.arange(depth) * channels_slope + channels_init
# Generate quantized per-block channels/widths:
channels_exps = np.round(np.log(channels_cont / channels_init) / np.log(channels_mult))
channels = channels_init * np.power(channels_mult, channels_exps)
channels = (np.round(channels / divisor) * divisor).astype(np.int)
# Generate per stage channels/widths and layers/depths:
channels_per_stage, layers = np.unique(channels, return_counts=True)
# Adjusts the compatibility of channels/widths and groups:
groups_per_stage = [min(groups, c) for c in channels_per_stage]
channels_per_stage = [int(round(c / g) * g) for c, g in zip(channels_per_stage, groups_per_stage)]
channels = [[ci] * li for (ci, li) in zip(channels_per_stage, layers)]
init_block_channels = 32
net = RegNet(
channels=channels,
init_block_channels=init_block_channels,
groups=groups_per_stage,
use_se=use_se,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def regnetx002(**kwargs):
"""
RegNetX-200MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=36.44, channels_mult=2.49, depth=13, groups=8,
model_name="regnetx002", **kwargs)
def regnetx004(**kwargs):
"""
RegNetX-400MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=24.48, channels_mult=2.54, depth=22, groups=16,
model_name="regnetx004", **kwargs)
def regnetx006(**kwargs):
"""
RegNetX-600MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=36.97, channels_mult=2.24, depth=16, groups=24,
model_name="regnetx006", **kwargs)
def regnetx008(**kwargs):
"""
RegNetX-800MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=56, channels_slope=35.73, channels_mult=2.28, depth=16, groups=16,
model_name="regnetx008", **kwargs)
def regnetx016(**kwargs):
"""
RegNetX-1.6GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=80, channels_slope=34.01, channels_mult=2.25, depth=18, groups=24,
model_name="regnetx016", **kwargs)
def regnetx032(**kwargs):
"""
RegNetX-3.2GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=88, channels_slope=26.31, channels_mult=2.25, depth=25, groups=48,
model_name="regnetx032", **kwargs)
def regnetx040(**kwargs):
"""
RegNetX-4.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=96, channels_slope=38.65, channels_mult=2.43, depth=23, groups=40,
model_name="regnetx040", **kwargs)
def regnetx064(**kwargs):
"""
RegNetX-6.4GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=184, channels_slope=60.83, channels_mult=2.07, depth=17, groups=56,
model_name="regnetx064", **kwargs)
def regnetx080(**kwargs):
"""
RegNetX-8.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=80, channels_slope=49.56, channels_mult=2.88, depth=23, groups=120,
model_name="regnetx080", **kwargs)
def regnetx120(**kwargs):
"""
RegNetX-12GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=168, channels_slope=73.36, channels_mult=2.37, depth=19, groups=112,
model_name="regnetx120", **kwargs)
def regnetx160(**kwargs):
"""
RegNetX-16GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=216, channels_slope=55.59, channels_mult=2.1, depth=22, groups=128,
model_name="regnetx160", **kwargs)
def regnetx320(**kwargs):
"""
RegNetX-32GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=320, channels_slope=69.86, channels_mult=2.0, depth=23, groups=168,
model_name="regnetx320", **kwargs)
def regnety002(**kwargs):
"""
RegNetY-200MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=36.44, channels_mult=2.49, depth=13, groups=8, use_se=True,
model_name="regnety002", **kwargs)
def regnety004(**kwargs):
"""
RegNetY-400MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=27.89, channels_mult=2.09, depth=16, groups=8, use_se=True,
model_name="regnety004", **kwargs)
def regnety006(**kwargs):
"""
RegNetY-600MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=32.54, channels_mult=2.32, depth=15, groups=16, use_se=True,
model_name="regnety006", **kwargs)
def regnety008(**kwargs):
"""
RegNetY-800MF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=56, channels_slope=38.84, channels_mult=2.4, depth=14, groups=16, use_se=True,
model_name="regnety008", **kwargs)
def regnety016(**kwargs):
"""
RegNetY-1.6GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=48, channels_slope=20.71, channels_mult=2.65, depth=27, groups=24, use_se=True,
model_name="regnety016", **kwargs)
def regnety032(**kwargs):
"""
RegNetY-3.2GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=80, channels_slope=42.63, channels_mult=2.66, depth=21, groups=24, use_se=True,
model_name="regnety032", **kwargs)
def regnety040(**kwargs):
"""
RegNetY-4.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=96, channels_slope=31.41, channels_mult=2.24, depth=22, groups=64, use_se=True,
model_name="regnety040", **kwargs)
def regnety064(**kwargs):
"""
RegNetY-6.4GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=112, channels_slope=33.22, channels_mult=2.27, depth=25, groups=72, use_se=True,
model_name="regnety064", **kwargs)
def regnety080(**kwargs):
"""
RegNetY-8.0GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=192, channels_slope=76.82, channels_mult=2.19, depth=17, groups=56, use_se=True,
model_name="regnety080", **kwargs)
def regnety120(**kwargs):
"""
RegNetY-12GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=168, channels_slope=73.36, channels_mult=2.37, depth=19, groups=112, use_se=True,
model_name="regnety120", **kwargs)
def regnety160(**kwargs):
"""
RegNetY-16GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=200, channels_slope=106.23, channels_mult=2.48, depth=18, groups=112, use_se=True,
model_name="regnety160", **kwargs)
def regnety320(**kwargs):
"""
RegNetY-32GF model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=232, channels_slope=115.89, channels_mult=2.53, depth=20, groups=232, use_se=True,
model_name="regnety320", **kwargs)
def regnetz002(**kwargs):
"""
RegNetZ-200MF experimental model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=36.44, channels_mult=2.49, depth=13, groups=1,
model_name="regnetz002", **kwargs)
def regnetw002(**kwargs):
"""
RegNetW-200MF experimental model from 'Designing Network Design Spaces,' https://arxiv.org/abs/2003.13678.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_regnet(channels_init=24, channels_slope=36.44, channels_mult=2.49, depth=13, groups=1024,
model_name="regnetw002", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
regnetx002,
regnetx004,
regnetx006,
regnetx008,
regnetx016,
regnetx032,
regnetx040,
regnetx064,
regnetx080,
regnetx120,
regnetx160,
regnetx320,
regnety002,
regnety004,
regnety006,
regnety008,
regnety016,
regnety032,
regnety040,
regnety064,
regnety080,
regnety120,
regnety160,
regnety320,
regnetz002,
regnetw002,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != regnetx002 or weight_count == 2684792)
assert (model != regnetx004 or weight_count == 5157512)
assert (model != regnetx006 or weight_count == 6196040)
assert (model != regnetx008 or weight_count == 7259656)
assert (model != regnetx016 or weight_count == 9190136)
assert (model != regnetx032 or weight_count == 15296552)
assert (model != regnetx040 or weight_count == 22118248)
assert (model != regnetx064 or weight_count == 26209256)
assert (model != regnetx080 or weight_count == 39572648)
assert (model != regnetx120 or weight_count == 46106056)
assert (model != regnetx160 or weight_count == 54278536)
assert (model != regnetx320 or weight_count == 107811560)
assert (model != regnety002 or weight_count == 3162996)
assert (model != regnety004 or weight_count == 4344144)
assert (model != regnety006 or weight_count == 6055160)
assert (model != regnety008 or weight_count == 6263168)
assert (model != regnety016 or weight_count == 11202430)
assert (model != regnety032 or weight_count == 19436338)
assert (model != regnety040 or weight_count == 20646656)
assert (model != regnety064 or weight_count == 30583252)
assert (model != regnety080 or weight_count == 39180068)
assert (model != regnety120 or weight_count == 51822544)
assert (model != regnety160 or weight_count == 83590140)
assert (model != regnety320 or weight_count == 145046770)
assert (model != regnetz002 or weight_count == 2479160)
assert (model != regnetw002 or weight_count == 11846648)
batch = 14
size = 224
x = mx.nd.zeros((batch, 3, size, size), ctx=ctx)
y = net(x)
assert (y.shape == (batch, 1000))
if __name__ == "__main__":
_test()
| 30,188 | 34.896552 | 118 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/icnet.py | """
ICNet for image segmentation, implemented in Gluon.
Original paper: 'ICNet for Real-Time Semantic Segmentation on High-Resolution Images,'
https://arxiv.org/abs/1704.08545.
"""
__all__ = ['ICNet', 'icnet_resnetd50b_cityscapes']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1, conv1x1_block, conv3x3_block, InterpolationBlock, MultiOutputSequential
from .pspnet import PyramidPooling
from .resnetd import resnetd50b
class ICInitBlock(HybridBlock):
"""
ICNet specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(ICInitBlock, self).__init__(**kwargs)
mid_channels = out_channels // 2
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=mid_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv2 = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv3 = conv3x3_block(
in_channels=mid_channels,
out_channels=out_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class PSPBlock(HybridBlock):
"""
ICNet specific PSPNet reduced head block.
Parameters:
----------
in_channels : int
Number of input channels.
upscale_out_size : tuple of 2 int
Spatial size of the input tensor for the bilinear upsampling operation.
bottleneck_factor : int
Bottleneck factor.
"""
def __init__(self,
in_channels,
upscale_out_size,
bottleneck_factor,
**kwargs):
super(PSPBlock, self).__init__(**kwargs)
assert (in_channels % bottleneck_factor == 0)
mid_channels = in_channels // bottleneck_factor
with self.name_scope():
self.pool = PyramidPooling(
in_channels=in_channels,
upscale_out_size=upscale_out_size)
self.conv = conv3x3_block(
in_channels=4096,
out_channels=mid_channels)
self.dropout = nn.Dropout(rate=0.1)
def hybrid_forward(self, F, x):
x = self.pool(x)
x = self.conv(x)
x = self.dropout(x)
return x
class CFFBlock(HybridBlock):
"""
Cascade Feature Fusion block.
Parameters:
----------
in_channels_low : int
Number of input channels (low input).
in_channels_high : int
Number of input channels (low high).
out_channels : int
Number of output channels.
out_size : tuple of two ints
Spatial size of the expected output image.
classes : int
Number of classification classes.
"""
def __init__(self,
in_channels_low,
in_channels_high,
out_channels,
out_size,
classes,
**kwargs):
super(CFFBlock, self).__init__(**kwargs)
with self.name_scope():
self.up = InterpolationBlock(
scale_factor=2,
out_size=out_size)
self.conv_low = conv3x3_block(
in_channels=in_channels_low,
out_channels=out_channels,
padding=2,
dilation=2,
activation=None)
self.conv_hign = conv1x1_block(
in_channels=in_channels_high,
out_channels=out_channels,
activation=None)
self.activ = nn.Activation("relu")
self.conv_cls = conv1x1(
in_channels=out_channels,
out_channels=classes)
def hybrid_forward(self, F, xl, xh):
xl = self.up(xl)
xl = self.conv_low(xl)
xh = self.conv_hign(xh)
x = xl + xh
x = self.activ(x)
x_cls = self.conv_cls(xl)
return x, x_cls
class ICHeadBlock(HybridBlock):
"""
ICNet head block.
Parameters:
----------
in_size : tuple of two ints
Spatial size of the expected output image.
classes : int
Number of classification classes.
"""
def __init__(self,
in_size,
classes,
**kwargs):
super(ICHeadBlock, self).__init__(**kwargs)
with self.name_scope():
self.cff_12 = CFFBlock(
in_channels_low=128,
in_channels_high=64,
out_channels=128,
classes=classes,
out_size=(in_size[0] // 8, in_size[1] // 8) if in_size is not None else None)
self.cff_24 = CFFBlock(
in_channels_low=256,
in_channels_high=256,
out_channels=128,
classes=classes,
out_size=(in_size[0] // 16, in_size[1] // 16) if in_size is not None else None)
self.up_x2 = InterpolationBlock(
scale_factor=2,
out_size=(in_size[0] // 4, in_size[1] // 4) if in_size is not None else None)
self.up_x8 = InterpolationBlock(
scale_factor=4,
out_size=in_size)
self.conv_cls = conv1x1(
in_channels=128,
out_channels=classes)
def hybrid_forward(self, F, x1, x2, x4):
outputs = []
x_cff_24, x_24_cls = self.cff_24(x4, x2)
outputs.append(x_24_cls)
x_cff_12, x_12_cls = self.cff_12(x_cff_24, x1)
outputs.append(x_12_cls)
up_x2 = self.up_x2(x_cff_12)
up_x2 = self.conv_cls(up_x2)
outputs.append(up_x2)
up_x8 = self.up_x8(up_x2)
outputs.append(up_x8)
# 1 -> 1/4 -> 1/8 -> 1/16
outputs.reverse()
return tuple(outputs)
class ICNet(HybridBlock):
"""
ICNet model from 'ICNet for Real-Time Semantic Segmentation on High-Resolution Images,'
https://arxiv.org/abs/1704.08545.
Parameters:
----------
backbones : tuple of nn.Sequential
Feature extractors.
backbones_out_channels : tuple of int
Number of output channels form each feature extractor.
channels : tuple of int
Number of output channels for each branch.
aux : bool, default False
Whether to output an auxiliary result.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (480, 480)
Spatial size of the expected input image.
classes : int, default 21
Number of segmentation classes.
"""
def __init__(self,
backbones,
backbones_out_channels,
channels,
aux=False,
fixed_size=True,
in_channels=3,
in_size=(480, 480),
classes=21,
**kwargs):
super(ICNet, self).__init__(**kwargs)
assert (in_channels > 0)
assert ((in_size[0] % 8 == 0) and (in_size[1] % 8 == 0))
self.in_size = in_size
self.classes = classes
self.aux = aux
self.fixed_size = fixed_size
psp_pool_out_size = (self.in_size[0] // 32, self.in_size[1] // 32) if fixed_size else None
psp_head_out_channels = 512
with self.name_scope():
self.branch1 = ICInitBlock(
in_channels=in_channels,
out_channels=channels[0])
self.branch2 = MultiOutputSequential(prefix="")
self.branch2.add(InterpolationBlock(
scale_factor=2,
out_size=(in_size[0] // 2, in_size[1] // 2) if fixed_size else None,
up=False))
backbones[0].do_output = True
self.branch2.add(backbones[0])
self.branch2.add(InterpolationBlock(
scale_factor=2,
out_size=(in_size[0] // 32, in_size[1] // 32) if fixed_size else None,
up=False))
self.branch2.add(backbones[1])
self.branch2.add(PSPBlock(
in_channels=backbones_out_channels[1],
upscale_out_size=psp_pool_out_size,
bottleneck_factor=4))
self.branch2.add(conv1x1_block(
in_channels=psp_head_out_channels,
out_channels=channels[2]))
self.conv_y2 = conv1x1_block(
in_channels=backbones_out_channels[0],
out_channels=channels[1])
self.final_block = ICHeadBlock(
in_size=in_size if fixed_size else None,
classes=classes)
def hybrid_forward(self, F, x):
y1 = self.branch1(x)
y3, y2 = self.branch2(x)
y2 = self.conv_y2(y2)
x = self.final_block(y1, y2, y3)
if self.aux:
return x
else:
return x[0]
def get_icnet(backbones,
backbones_out_channels,
classes,
aux=False,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ICNet model with specific parameters.
Parameters:
----------
backbones : tuple of nn.Sequential
Feature extractors.
backbones_out_channels : tuple of int
Number of output channels form each feature extractor.
classes : int
Number of segmentation classes.
aux : bool, default False
Whether to output an auxiliary result.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
channels = (64, 256, 256)
backbones[0].multi_output = False
backbones[1].multi_output = False
net = ICNet(
backbones=backbones,
backbones_out_channels=backbones_out_channels,
channels=channels,
classes=classes,
aux=aux,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx,
ignore_extra=True)
return net
def icnet_resnetd50b_cityscapes(pretrained_backbone=False, classes=19, aux=True, **kwargs):
"""
ICNet model on the base of ResNet(D)-50b for Cityscapes from 'ICNet for Real-Time Semantic Segmentation on
High-Resolution Images,' https://arxiv.org/abs/1704.08545.
Parameters:
----------
pretrained_backbone : bool, default False
Whether to load the pretrained weights for feature extractor.
classes : int, default 19
Number of segmentation classes.
aux : bool, default True
Whether to output an auxiliary result.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
backbone = resnetd50b(pretrained=pretrained_backbone, ordinary_init=False, bends=None).features[:-1]
backbones = (backbone[:3], backbone[3:])
backbones_out_channels = (512, 2048)
return get_icnet(backbones=backbones, backbones_out_channels=backbones_out_channels, classes=classes, aux=aux,
model_name="icnet_resnetd50b_cityscapes", **kwargs)
def _test():
import numpy as np
import mxnet as mx
in_size = (1024, 2048)
aux = False
pretrained = False
fixed_size = False
models = [
(icnet_resnetd50b_cityscapes, 19),
]
for model, classes in models:
net = model(pretrained=pretrained, in_size=in_size, fixed_size=fixed_size, aux=aux)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != icnet_resnetd50b_cityscapes or weight_count == 47489184)
x = mx.nd.zeros((1, 3, in_size[0], in_size[1]), ctx=ctx)
ys = net(x)
y = ys[0] if aux else ys
assert ((y.shape[0] == x.shape[0]) and (y.shape[1] == classes) and (y.shape[2] == x.shape[2]) and
(y.shape[3] == x.shape[3]))
if __name__ == "__main__":
_test()
| 14,177 | 31.668203 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/mobilenetb.py | """
MobileNet(B) with simplified depthwise separable convolution block for ImageNet-1K, implemented in Gluon.
Original paper: 'MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications,'
https://arxiv.org/abs/1704.04861.
"""
__all__ = ['mobilenetb_w1', 'mobilenetb_w3d4', 'mobilenetb_wd2', 'mobilenetb_wd4']
from .mobilenet import get_mobilenet
def mobilenetb_w1(**kwargs):
"""
1.0 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=1.0, dws_simplified=True, model_name="mobilenetb_w1", **kwargs)
def mobilenetb_w3d4(**kwargs):
"""
0.75 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.75, dws_simplified=True, model_name="mobilenetb_w3d4", **kwargs)
def mobilenetb_wd2(**kwargs):
"""
0.5 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.5, dws_simplified=True, model_name="mobilenetb_wd2", **kwargs)
def mobilenetb_wd4(**kwargs):
"""
0.25 MobileNet(B)-224 model with simplified depthwise separable convolution block from 'MobileNets: Efficient
Convolutional Neural Networks for Mobile Vision Applications,' https://arxiv.org/abs/1704.04861.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_mobilenet(width_scale=0.25, dws_simplified=True, model_name="mobilenetb_wd4", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
mobilenetb_w1,
mobilenetb_w3d4,
mobilenetb_wd2,
mobilenetb_wd4,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != mobilenetb_w1 or weight_count == 4222056)
assert (model != mobilenetb_w3d4 or weight_count == 2578120)
assert (model != mobilenetb_wd2 or weight_count == 1326632)
assert (model != mobilenetb_wd4 or weight_count == 467592)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 4,189 | 33.916667 | 113 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/shakedropresnet_cifar.py | """
ShakeDrop-ResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'ShakeDrop Regularization for Deep Residual Learning,' https://arxiv.org/abs/1802.02375.
"""
__all__ = ['CIFARShakeDropResNet', 'shakedropresnet20_cifar10', 'shakedropresnet20_cifar100', 'shakedropresnet20_svhn']
import os
import numpy as np
import mxnet as mx
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block
from .resnet import ResBlock, ResBottleneck
class ShakeDrop(mx.autograd.Function):
"""
ShakeDrop function.
Parameters:
----------
p : float
ShakeDrop specific probability (of life) for Bernoulli random variable.
"""
def __init__(self, p):
super(ShakeDrop, self).__init__()
self.p = p
def forward(self, x):
if mx.autograd.is_training():
b = np.random.binomial(n=1, p=self.p)
alpha = mx.nd.random.uniform_like(x.slice(begin=(None, 0, 0, 0), end=(None, 1, 1, 1)), low=-1.0, high=1.0)
y = mx.nd.broadcast_mul(b + alpha - b * alpha, x)
self.save_for_backward(b)
else:
y = self.p * x
return y
def backward(self, dy):
b, = self.saved_tensors
beta = mx.nd.random.uniform_like(dy.slice(begin=(None, 0, 0, 0), end=(None, 1, 1, 1)), low=0.0, high=1.0)
return mx.nd.broadcast_mul(b + beta - b * beta, dy)
class ShakeDropResUnit(HybridBlock):
"""
ShakeDrop-ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_prob : float
Residual branch life probability.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
bottleneck,
life_prob,
**kwargs):
super(ShakeDropResUnit, self).__init__(**kwargs)
self.life_prob = life_prob
self.resize_identity = (in_channels != out_channels) or (strides != 1)
body_class = ResBottleneck if bottleneck else ResBlock
with self.name_scope():
self.body = body_class(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
if self.resize_identity:
self.identity_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.activ = nn.Activation("relu")
# self.shake_drop = ShakeDrop(self.life_prob)
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = ShakeDrop(self.life_prob)(x) + identity
# x = self.shake_drop(x) + identity
x = self.activ(x)
return x
class CIFARShakeDropResNet(HybridBlock):
"""
ShakeDrop-ResNet model for CIFAR from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_probs : list of float
Residual branch life probability for each unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
life_probs,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARShakeDropResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
k = 0
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ShakeDropResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
life_prob=life_probs[k]))
in_channels = out_channels
k += 1
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_shakedropresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create ShakeDrop-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
init_block_channels = 16
channels_per_layers = [16, 32, 64]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
total_layers = sum(layers)
final_death_prob = 0.5
life_probs = [1.0 - float(i + 1) / float(total_layers) * final_death_prob for i in range(total_layers)]
net = CIFARShakeDropResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
life_probs=life_probs,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def shakedropresnet20_cifar10(classes=10, **kwargs):
"""
ShakeDrop-ResNet-20 model for CIFAR-10 from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_cifar10", **kwargs)
def shakedropresnet20_cifar100(classes=100, **kwargs):
"""
ShakeDrop-ResNet-20 model for CIFAR-100 from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_cifar100", **kwargs)
def shakedropresnet20_svhn(classes=10, **kwargs):
"""
ShakeDrop-ResNet-20 model for SVHN from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_shakedropresnet_cifar(classes=classes, blocks=20, bottleneck=False,
model_name="shakedropresnet20_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(shakedropresnet20_cifar10, 10),
(shakedropresnet20_cifar100, 100),
(shakedropresnet20_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != shakedropresnet20_cifar10 or weight_count == 272474)
assert (model != shakedropresnet20_cifar100 or weight_count == 278324)
assert (model != shakedropresnet20_svhn or weight_count == 272474)
x = mx.nd.zeros((14, 3, 32, 32), ctx=ctx)
# y = net(x)
with mx.autograd.record():
y = net(x)
y.backward()
assert (y.shape == (14, classes))
if __name__ == "__main__":
_test()
| 12,306 | 33.570225 | 119 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/inceptionresnetv1.py | """
InceptionResNetV1 for ImageNet-1K, implemented in Gluon.
Original paper: 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
"""
__all__ = ['InceptionResNetV1', 'inceptionresnetv1', 'InceptionAUnit', 'InceptionBUnit', 'InceptionCUnit',
'ReductionAUnit', 'ReductionBUnit']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import HybridConcurrent
from .common import conv1x1, conv1x1_block, conv3x3_block, BatchNormExtra
from .inceptionv3 import MaxPoolBranch, Conv1x1Branch, ConvSeqBranch
class InceptionAUnit(HybridBlock):
"""
InceptionResNetV1 type Inception-A unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptionAUnit, self).__init__(**kwargs)
self.scale = 0.17
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=out_channels_list[0],
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:3],
kernel_size_list=(1, 3),
strides_list=(1, 1),
padding_list=(0, 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[3:6],
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 1),
padding_list=(0, 1, 1),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
conv_in_channels = out_channels_list[0] + out_channels_list[2] + out_channels_list[5]
self.conv = conv1x1(
in_channels=conv_in_channels,
out_channels=in_channels,
use_bias=True)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
identity = x
x = self.branches(x)
x = self.conv(x)
x = self.scale * x + identity
x = self.activ(x)
return x
class InceptionBUnit(HybridBlock):
"""
InceptionResNetV1 type Inception-B unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptionBUnit, self).__init__(**kwargs)
self.scale = 0.10
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=out_channels_list[0],
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:4],
kernel_size_list=(1, (1, 7), (7, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 3), (3, 0)),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
conv_in_channels = out_channels_list[0] + out_channels_list[3]
self.conv = conv1x1(
in_channels=conv_in_channels,
out_channels=in_channels,
use_bias=True)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
identity = x
x = self.branches(x)
x = self.conv(x)
x = self.scale * x + identity
x = self.activ(x)
return x
class InceptionCUnit(HybridBlock):
"""
InceptionResNetV1 type Inception-C unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
scale : float, default 0.2
Scale value for residual branch.
activate : bool, default True
Whether activate the convolution block.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_epsilon,
bn_use_global_stats=False,
scale=0.2,
activate=True,
**kwargs):
super(InceptionCUnit, self).__init__(**kwargs)
self.activate = activate
self.scale = scale
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(Conv1x1Branch(
in_channels=in_channels,
out_channels=out_channels_list[0],
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:4],
kernel_size_list=(1, (1, 3), (3, 1)),
strides_list=(1, 1, 1),
padding_list=(0, (0, 1), (1, 0)),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
conv_in_channels = out_channels_list[0] + out_channels_list[3]
self.conv = conv1x1(
in_channels=conv_in_channels,
out_channels=in_channels,
use_bias=True)
if self.activate:
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
identity = x
x = self.branches(x)
x = self.conv(x)
x = self.scale * x + identity
if self.activate:
x = self.activ(x)
return x
class ReductionAUnit(HybridBlock):
"""
InceptionResNetV1 type Reduction-A unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(ReductionAUnit, self).__init__(**kwargs)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[0:1],
kernel_size_list=(3,),
strides_list=(2,),
padding_list=(0,),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[1:4],
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(MaxPoolBranch())
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class ReductionBUnit(HybridBlock):
"""
InceptionResNetV1 type Reduction-B unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
List for numbers of output channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(ReductionBUnit, self).__init__(**kwargs)
with self.name_scope():
self.branches = HybridConcurrent(axis=1, prefix="")
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[0:2],
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[2:4],
kernel_size_list=(1, 3),
strides_list=(1, 2),
padding_list=(0, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(ConvSeqBranch(
in_channels=in_channels,
out_channels_list=out_channels_list[4:7],
kernel_size_list=(1, 3, 3),
strides_list=(1, 1, 2),
padding_list=(0, 1, 0),
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
self.branches.add(MaxPoolBranch())
def hybrid_forward(self, F, x):
x = self.branches(x)
return x
class InceptInitBlock(HybridBlock):
"""
InceptionResNetV1 specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_epsilon,
bn_use_global_stats,
**kwargs):
super(InceptInitBlock, self).__init__(**kwargs)
with self.name_scope():
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=32,
strides=2,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = conv3x3_block(
in_channels=32,
out_channels=32,
strides=1,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = conv3x3_block(
in_channels=32,
out_channels=64,
strides=1,
padding=1,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.pool = nn.MaxPool2D(
pool_size=3,
strides=2,
padding=0)
self.conv4 = conv1x1_block(
in_channels=64,
out_channels=80,
strides=1,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv5 = conv3x3_block(
in_channels=80,
out_channels=192,
strides=1,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
self.conv6 = conv3x3_block(
in_channels=192,
out_channels=256,
strides=2,
padding=0,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.pool(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.conv6(x)
return x
class InceptHead(HybridBlock):
"""
InceptionResNetV1 specific classification block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_epsilon : float
Small float added to variance in Batch norm.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
dropout_rate : float
Fraction of the input units to drop. Must be a number between 0 and 1.
classes : int
Number of classification classes.
"""
def __init__(self,
in_channels,
bn_epsilon,
bn_use_global_stats,
dropout_rate,
classes,
**kwargs):
super(InceptHead, self).__init__(**kwargs)
self.use_dropout = (dropout_rate != 0.0)
with self.name_scope():
self.flatten = nn.Flatten()
if self.use_dropout:
self.dropout = nn.Dropout(rate=dropout_rate)
self.fc1 = nn.Dense(
units=512,
use_bias=False,
in_units=in_channels)
self.bn = BatchNormExtra(
in_channels=512,
epsilon=bn_epsilon,
use_global_stats=bn_use_global_stats)
self.fc2 = nn.Dense(
units=classes,
in_units=512)
def hybrid_forward(self, F, x):
x = self.flatten(x)
if self.use_dropout:
x = self.dropout(x)
x = self.fc1(x)
x = self.bn(x)
x = self.fc2(x)
return x
class InceptionResNetV1(HybridBlock):
"""
InceptionResNetV1 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
dropout_rate : float, default 0.0
Fraction of the input units to drop. Must be a number between 0 and 1.
bn_epsilon : float, default 1e-5
Small float added to variance in Batch norm.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (299, 299)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
dropout_rate=0.0,
bn_epsilon=1e-5,
bn_use_global_stats=False,
in_channels=3,
in_size=(299, 299),
classes=1000,
**kwargs):
super(InceptionResNetV1, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
layers = [5, 11, 7]
in_channels_list = [256, 896, 1792]
normal_out_channels_list = [[32, 32, 32, 32, 32, 32], [128, 128, 128, 128], [192, 192, 192, 192]]
reduction_out_channels_list = [[384, 192, 192, 256], [256, 384, 256, 256, 256, 256, 256]]
normal_units = [InceptionAUnit, InceptionBUnit, InceptionCUnit]
reduction_units = [ReductionAUnit, ReductionBUnit]
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(InceptInitBlock(
in_channels=in_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats))
in_channels = in_channels_list[0]
for i, layers_per_stage in enumerate(layers):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j in range(layers_per_stage):
if (j == 0) and (i != 0):
unit = reduction_units[i - 1]
out_channels_list_per_stage = reduction_out_channels_list[i - 1]
else:
unit = normal_units[i]
out_channels_list_per_stage = normal_out_channels_list[i]
if (i == len(layers) - 1) and (j == layers_per_stage - 1):
unit_kwargs = {"scale": 1.0, "activate": False}
else:
unit_kwargs = {}
stage.add(unit(
in_channels=in_channels,
out_channels_list=out_channels_list_per_stage,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
**unit_kwargs))
if (j == 0) and (i != 0):
in_channels = in_channels_list[i]
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = InceptHead(
in_channels=in_channels,
bn_epsilon=bn_epsilon,
bn_use_global_stats=bn_use_global_stats,
dropout_rate=dropout_rate,
classes=classes)
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_inceptionresnetv1(model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create InceptionResNetV1 model with specific parameters.
Parameters:
----------
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
net = InceptionResNetV1(**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def inceptionresnetv1(**kwargs):
"""
InceptionResNetV1 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_inceptionresnetv1(model_name="inceptionresnetv1", bn_epsilon=1e-3, **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
inceptionresnetv1,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != inceptionresnetv1 or weight_count == 23995624)
x = mx.nd.zeros((1, 3, 299, 299), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 21,298 | 34.204959 | 117 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/scnet.py | """
SCNet for ImageNet-1K, implemented in Gluon.
Original paper: 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
"""
__all__ = ['SCNet', 'scnet50', 'scnet101', 'scneta50', 'scneta101']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, InterpolationBlock
from .resnet import ResInitBlock
from .senet import SEInitBlock
from .resnesta import ResNeStADownBlock
class ScDownBlock(HybridBlock):
"""
SCNet specific convolutional downscale block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
pool_size: int or list/tuple of 2 ints, default 2
Size of the average pooling windows.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
"""
def __init__(self,
in_channels,
out_channels,
pool_size=2,
bn_use_global_stats=False,
bn_cudnn_off=False,
**kwargs):
super(ScDownBlock, self).__init__(**kwargs)
with self.name_scope():
self.pool = nn.AvgPool2D(
pool_size=pool_size,
strides=pool_size)
self.conv = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
def hybrid_forward(self, F, x):
x = self.pool(x)
x = self.conv(x)
return x
class ScConv(HybridBlock):
"""
Self-calibrated convolutional block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
scale_factor : int
Scale factor.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
in_size : tuple of 2 int, default None
Spatial size of output image for the upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
strides,
scale_factor,
bn_use_global_stats=False,
bn_cudnn_off=False,
in_size=None,
**kwargs):
super(ScConv, self).__init__(**kwargs)
self.in_size = in_size
with self.name_scope():
self.down = ScDownBlock(
in_channels=in_channels,
out_channels=out_channels,
pool_size=scale_factor,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.up = InterpolationBlock(scale_factor=scale_factor, bilinear=False)
self.sigmoid = nn.Activation("sigmoid")
self.conv1 = conv3x3_block(
in_channels=in_channels,
out_channels=in_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
self.conv2 = conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
def hybrid_forward(self, F, x):
in_size = self.in_size if self.in_size is not None else x.shape[2:]
w = self.sigmoid(x + self.up(self.down(x), in_size))
x = self.conv1(x) * w
x = self.conv2(x)
return x
class ScBottleneck(HybridBlock):
"""
SCNet specific bottleneck block for residual path in SCNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bottleneck_factor : int, default 4
Bottleneck factor.
scale_factor : int, default 4
Scale factor.
avg_downsample : bool, default False
Whether to use average downsampling.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
in_size : tuple of 2 int, default None
Spatial size of output image for the upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bottleneck_factor=4,
scale_factor=4,
avg_downsample=False,
bn_use_global_stats=False,
bn_cudnn_off=False,
in_size=None,
**kwargs):
super(ScBottleneck, self).__init__(**kwargs)
self.avg_resize = (strides > 1) and avg_downsample
mid_channels = out_channels // bottleneck_factor // 2
with self.name_scope():
self.conv1a = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv2a = conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=(1 if self.avg_resize else strides),
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
self.conv1b = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels)
self.conv2b = ScConv(
in_channels=mid_channels,
out_channels=mid_channels,
strides=(1 if self.avg_resize else strides),
scale_factor=scale_factor,
in_size=in_size)
if self.avg_resize:
self.pool = nn.AvgPool2D(
pool_size=3,
strides=strides,
padding=1)
self.conv3 = conv1x1_block(
in_channels=(2 * mid_channels),
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
def hybrid_forward(self, F, x):
y = self.conv1a(x)
y = self.conv2a(y)
z = self.conv1b(x)
z = self.conv2b(z)
if self.avg_resize:
y = self.pool(y)
z = self.pool(z)
x = F.concat(y, z, dim=1)
x = self.conv3(x)
return x
class ScUnit(HybridBlock):
"""
SCNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
avg_downsample : bool, default False
Whether to use average downsampling.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
in_size : tuple of 2 int, default None
Spatial size of output image for the upsampling operation.
"""
def __init__(self,
in_channels,
out_channels,
strides,
avg_downsample=False,
bn_use_global_stats=False,
bn_cudnn_off=False,
in_size=None,
**kwargs):
super(ScUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = ScBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
avg_downsample=avg_downsample,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
in_size=in_size)
if self.resize_identity:
if avg_downsample:
self.identity_block = ResNeStADownBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off)
else:
self.identity_block = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
activation=None)
self.activ = nn.Activation("relu")
def hybrid_forward(self, F, x):
if self.resize_identity:
identity = self.identity_block(x)
else:
identity = x
x = self.body(x)
x = x + identity
x = self.activ(x)
return x
class SCNet(HybridBlock):
"""
SCNet model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
se_init_block : bool, default False
SENet-like initial block.
avg_downsample : bool, default False
Whether to use average downsampling.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bn_cudnn_off : bool, default False
Whether to disable CUDNN batch normalization operator.
fixed_size : bool, default True
Whether to expect fixed spatial size of input image.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
se_init_block=False,
avg_downsample=False,
bn_use_global_stats=False,
bn_cudnn_off=False,
fixed_size=True,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(SCNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.fixed_size = fixed_size
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
init_block_class = SEInitBlock if se_init_block else ResInitBlock
self.features.add(init_block_class(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off))
in_channels = init_block_channels
in_size = (in_size[0] // 4, in_size[1] // 4)
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(ScUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
avg_downsample=avg_downsample,
bn_use_global_stats=bn_use_global_stats,
bn_cudnn_off=bn_cudnn_off,
in_size=in_size))
in_channels = out_channels
if strides > 1:
in_size = (in_size[0] // 2, in_size[1] // 2) if fixed_size else None
self.features.add(stage)
self.features.add(nn.GlobalAvgPool2D())
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_scnet(blocks,
width_scale=1.0,
se_init_block=False,
avg_downsample=False,
init_block_channels_scale=1,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SCNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
width_scale : float, default 1.0
Scale factor for width of layers.
se_init_block : bool, default False
SENet-like initial block.
avg_downsample : bool, default False
Whether to use average downsampling.
init_block_channels_scale : int, default 1
Scale factor for number of output channels in the initial unit.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 14:
layers = [1, 1, 1, 1]
elif blocks == 26:
layers = [2, 2, 2, 2]
elif blocks == 38:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported SCNet with number of blocks: {}".format(blocks))
assert (sum(layers) * 3 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
init_block_channels *= init_block_channels_scale
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if width_scale != 1.0:
channels = [[int(cij * width_scale) if (i != len(channels) - 1) or (j != len(ci) - 1) else cij
for j, cij in enumerate(ci)] for i, ci in enumerate(channels)]
init_block_channels = int(init_block_channels * width_scale)
net = SCNet(
channels=channels,
init_block_channels=init_block_channels,
se_init_block=se_init_block,
avg_downsample=avg_downsample,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def scnet50(**kwargs):
"""
SCNet-50 model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=50, model_name="scnet50", **kwargs)
def scnet101(**kwargs):
"""
SCNet-101 model from 'Improving Convolutional Networks with Self-Calibrated Convolutions,'
http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=101, model_name="scnet101", **kwargs)
def scneta50(**kwargs):
"""
SCNet(A)-50 with average downsampling model from 'Improving Convolutional Networks with Self-Calibrated
Convolutions,' http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=50, se_init_block=True, avg_downsample=True, model_name="scneta50", **kwargs)
def scneta101(**kwargs):
"""
SCNet(A)-101 with average downsampling model from 'Improving Convolutional Networks with Self-Calibrated
Convolutions,' http://mftp.mmcheng.net/Papers/20cvprSCNet.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_scnet(blocks=101, se_init_block=True, avg_downsample=True, init_block_channels_scale=2,
model_name="scneta101", **kwargs)
def _test():
import numpy as np
import mxnet as mx
fixed_size = True
pretrained = False
models = [
scnet50,
scnet101,
scneta50,
scneta101,
]
for model in models:
net = model(pretrained=pretrained, fixed_size=fixed_size)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != scnet50 or weight_count == 25564584)
assert (model != scnet101 or weight_count == 44565416)
assert (model != scneta50 or weight_count == 25583816)
assert (model != scneta101 or weight_count == 44689192)
batch = 1
x = mx.nd.random.normal(shape=(batch, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (batch, 1000))
if __name__ == "__main__":
_test()
| 19,878 | 33.814361 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/igcv3.py | """
IGCV3 for ImageNet-1K, implemented in Gluon.
Original paper: 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
"""
__all__ = ['IGCV3', 'igcv3_w1', 'igcv3_w3d4', 'igcv3_wd2', 'igcv3_wd4']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv1x1_block, conv3x3_block, dwconv3x3_block, ChannelShuffle, ReLU6
class InvResUnit(HybridBlock):
"""
So-called 'Inverted Residual Unit' layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
expansion : bool
Whether do expansion of channels.
"""
def __init__(self,
in_channels,
out_channels,
strides,
bn_use_global_stats,
expansion,
**kwargs):
super(InvResUnit, self).__init__(**kwargs)
self.residual = (in_channels == out_channels) and (strides == 1)
mid_channels = in_channels * 6 if expansion else in_channels
groups = 2
with self.name_scope():
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
groups=groups,
bn_use_global_stats=bn_use_global_stats,
activation=None)
self.c_shuffle = ChannelShuffle(
channels=mid_channels,
groups=groups)
self.conv2 = dwconv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
activation=ReLU6())
self.conv3 = conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
groups=groups,
bn_use_global_stats=bn_use_global_stats,
activation=None)
def hybrid_forward(self, F, x):
if self.residual:
identity = x
x = self.conv1(x)
x = self.c_shuffle(x)
x = self.conv2(x)
x = self.conv3(x)
if self.residual:
x = x + identity
return x
class IGCV3(HybridBlock):
"""
IGCV3 model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(IGCV3, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
bn_use_global_stats=bn_use_global_stats,
activation=ReLU6()))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
expansion = (i != 0) or (j != 0)
stage.add(InvResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
expansion=expansion))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
bn_use_global_stats=bn_use_global_stats,
activation=ReLU6()))
in_channels = final_block_channels
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_igcv3(width_scale,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create IGCV3-D model with specific parameters.
Parameters:
----------
width_scale : float
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
init_block_channels = 32
final_block_channels = 1280
layers = [1, 4, 6, 8, 6, 6, 1]
downsample = [0, 1, 1, 1, 0, 1, 0]
channels_per_layers = [16, 24, 32, 64, 96, 160, 320]
from functools import reduce
channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],
zip(channels_per_layers, layers, downsample), [[]])
if width_scale != 1.0:
def make_even(x):
return x if (x % 2 == 0) else x + 1
channels = [[make_even(int(cij * width_scale)) for cij in ci] for ci in channels]
init_block_channels = make_even(int(init_block_channels * width_scale))
if width_scale > 1.0:
final_block_channels = make_even(int(final_block_channels * width_scale))
net = IGCV3(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def igcv3_w1(**kwargs):
"""
IGCV3-D 1.0x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_igcv3(width_scale=1.0, model_name="igcv3_w1", **kwargs)
def igcv3_w3d4(**kwargs):
"""
IGCV3-D 0.75x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_igcv3(width_scale=0.75, model_name="igcv3_w3d4", **kwargs)
def igcv3_wd2(**kwargs):
"""
IGCV3-D 0.5x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_igcv3(width_scale=0.5, model_name="igcv3_wd2", **kwargs)
def igcv3_wd4(**kwargs):
"""
IGCV3-D 0.25x model from 'IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks,'
https://arxiv.org/abs/1806.00178.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_igcv3(width_scale=0.25, model_name="igcv3_wd4", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
igcv3_w1,
igcv3_w3d4,
igcv3_wd2,
igcv3_wd4,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != igcv3_w1 or weight_count == 3491688)
assert (model != igcv3_w3d4 or weight_count == 2638084)
assert (model != igcv3_wd2 or weight_count == 1985528)
assert (model != igcv3_wd4 or weight_count == 1534020)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| 11,243 | 33.280488 | 115 | py |
imgclsmob | imgclsmob-master/gluon/gluoncv2/models/seresnet_cifar.py | """
SE-ResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['CIFARSEResNet', 'seresnet20_cifar10', 'seresnet20_cifar100', 'seresnet20_svhn',
'seresnet56_cifar10', 'seresnet56_cifar100', 'seresnet56_svhn',
'seresnet110_cifar10', 'seresnet110_cifar100', 'seresnet110_svhn',
'seresnet164bn_cifar10', 'seresnet164bn_cifar100', 'seresnet164bn_svhn',
'seresnet272bn_cifar10', 'seresnet272bn_cifar100', 'seresnet272bn_svhn',
'seresnet542bn_cifar10', 'seresnet542bn_cifar100', 'seresnet542bn_svhn',
'seresnet1001_cifar10', 'seresnet1001_cifar100', 'seresnet1001_svhn',
'seresnet1202_cifar10', 'seresnet1202_cifar100', 'seresnet1202_svhn']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from .common import conv3x3_block
from .seresnet import SEResUnit
class CIFARSEResNet(HybridBlock):
"""
SE-ResNet model for CIFAR from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
bn_use_global_stats=False,
in_channels=3,
in_size=(32, 32),
classes=10,
**kwargs):
super(CIFARSEResNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.HybridSequential(prefix="stage{}_".format(i + 1))
with stage.name_scope():
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(SEResUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats,
bottleneck=bottleneck,
conv1_stride=False))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AvgPool2D(
pool_size=8,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(nn.Flatten())
self.output.add(nn.Dense(
units=classes,
in_units=in_channels))
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_seresnet_cifar(classes,
blocks,
bottleneck,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create SE-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
assert (classes in [10, 100])
if bottleneck:
assert ((blocks - 2) % 9 == 0)
layers = [(blocks - 2) // 9] * 3
else:
assert ((blocks - 2) % 6 == 0)
layers = [(blocks - 2) // 6] * 3
channels_per_layers = [16, 32, 64]
init_block_channels = 16
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
if bottleneck:
channels = [[cij * 4 for cij in ci] for ci in channels]
net = CIFARSEResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
classes=classes,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def seresnet20_cifar10(classes=10, **kwargs):
"""
SE-ResNet-20 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="seresnet20_cifar10", **kwargs)
def seresnet20_cifar100(classes=100, **kwargs):
"""
SE-ResNet-20 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="seresnet20_cifar100", **kwargs)
def seresnet20_svhn(classes=10, **kwargs):
"""
SE-ResNet-20 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name="seresnet20_svhn", **kwargs)
def seresnet56_cifar10(classes=10, **kwargs):
"""
SE-ResNet-56 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="seresnet56_cifar10", **kwargs)
def seresnet56_cifar100(classes=100, **kwargs):
"""
SE-ResNet-56 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="seresnet56_cifar100", **kwargs)
def seresnet56_svhn(classes=10, **kwargs):
"""
SE-ResNet-56 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name="seresnet56_svhn", **kwargs)
def seresnet110_cifar10(classes=10, **kwargs):
"""
SE-ResNet-110 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="seresnet110_cifar10", **kwargs)
def seresnet110_cifar100(classes=100, **kwargs):
"""
SE-ResNet-110 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="seresnet110_cifar100",
**kwargs)
def seresnet110_svhn(classes=10, **kwargs):
"""
SE-ResNet-110 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name="seresnet110_svhn", **kwargs)
def seresnet164bn_cifar10(classes=10, **kwargs):
"""
SE-ResNet-164(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="seresnet164bn_cifar10",
**kwargs)
def seresnet164bn_cifar100(classes=100, **kwargs):
"""
SE-ResNet-164(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="seresnet164bn_cifar100",
**kwargs)
def seresnet164bn_svhn(classes=10, **kwargs):
"""
SE-ResNet-164(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name="seresnet164bn_svhn", **kwargs)
def seresnet272bn_cifar10(classes=10, **kwargs):
"""
SE-ResNet-272(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="seresnet272bn_cifar10",
**kwargs)
def seresnet272bn_cifar100(classes=100, **kwargs):
"""
SE-ResNet-272(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="seresnet272bn_cifar100",
**kwargs)
def seresnet272bn_svhn(classes=10, **kwargs):
"""
SE-ResNet-272(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=272, bottleneck=True, model_name="seresnet272bn_svhn", **kwargs)
def seresnet542bn_cifar10(classes=10, **kwargs):
"""
SE-ResNet-542(BN) model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="seresnet542bn_cifar10",
**kwargs)
def seresnet542bn_cifar100(classes=100, **kwargs):
"""
SE-ResNet-542(BN) model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="seresnet542bn_cifar100",
**kwargs)
def seresnet542bn_svhn(classes=10, **kwargs):
"""
SE-ResNet-542(BN) model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=542, bottleneck=True, model_name="seresnet542bn_svhn", **kwargs)
def seresnet1001_cifar10(classes=10, **kwargs):
"""
SE-ResNet-1001 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="seresnet1001_cifar10",
**kwargs)
def seresnet1001_cifar100(classes=100, **kwargs):
"""
SE-ResNet-1001 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="seresnet1001_cifar100",
**kwargs)
def seresnet1001_svhn(classes=10, **kwargs):
"""
SE-ResNet-1001 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name="seresnet1001_svhn", **kwargs)
def seresnet1202_cifar10(classes=10, **kwargs):
"""
SE-ResNet-1202 model for CIFAR-10 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="seresnet1202_cifar10",
**kwargs)
def seresnet1202_cifar100(classes=100, **kwargs):
"""
SE-ResNet-1202 model for CIFAR-100 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="seresnet1202_cifar100",
**kwargs)
def seresnet1202_svhn(classes=10, **kwargs):
"""
SE-ResNet-1202 model for SVHN from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_seresnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name="seresnet1202_svhn", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
(seresnet20_cifar10, 10),
(seresnet20_cifar100, 100),
(seresnet20_svhn, 10),
(seresnet56_cifar10, 10),
(seresnet56_cifar100, 100),
(seresnet56_svhn, 10),
(seresnet110_cifar10, 10),
(seresnet110_cifar100, 100),
(seresnet110_svhn, 10),
(seresnet164bn_cifar10, 10),
(seresnet164bn_cifar100, 100),
(seresnet164bn_svhn, 10),
(seresnet272bn_cifar10, 10),
(seresnet272bn_cifar100, 100),
(seresnet272bn_svhn, 10),
(seresnet542bn_cifar10, 10),
(seresnet542bn_cifar100, 100),
(seresnet542bn_svhn, 10),
(seresnet1001_cifar10, 10),
(seresnet1001_cifar100, 100),
(seresnet1001_svhn, 10),
(seresnet1202_cifar10, 10),
(seresnet1202_cifar100, 100),
(seresnet1202_svhn, 10),
]
for model, classes in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != seresnet20_cifar10 or weight_count == 274847)
assert (model != seresnet20_cifar100 or weight_count == 280697)
assert (model != seresnet20_svhn or weight_count == 274847)
assert (model != seresnet56_cifar10 or weight_count == 862889)
assert (model != seresnet56_cifar100 or weight_count == 868739)
assert (model != seresnet56_svhn or weight_count == 862889)
assert (model != seresnet110_cifar10 or weight_count == 1744952)
assert (model != seresnet110_cifar100 or weight_count == 1750802)
assert (model != seresnet110_svhn or weight_count == 1744952)
assert (model != seresnet164bn_cifar10 or weight_count == 1906258)
assert (model != seresnet164bn_cifar100 or weight_count == 1929388)
assert (model != seresnet164bn_svhn or weight_count == 1906258)
assert (model != seresnet272bn_cifar10 or weight_count == 3153826)
assert (model != seresnet272bn_cifar100 or weight_count == 3176956)
assert (model != seresnet272bn_svhn or weight_count == 3153826)
assert (model != seresnet542bn_cifar10 or weight_count == 6272746)
assert (model != seresnet542bn_cifar100 or weight_count == 6295876)
assert (model != seresnet542bn_svhn or weight_count == 6272746)
assert (model != seresnet1001_cifar10 or weight_count == 11574910)
assert (model != seresnet1001_cifar100 or weight_count == 11598040)
assert (model != seresnet1001_svhn or weight_count == 11574910)
assert (model != seresnet1202_cifar10 or weight_count == 19582226)
assert (model != seresnet1202_cifar100 or weight_count == 19588076)
assert (model != seresnet1202_svhn or weight_count == 19582226)
x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)
y = net(x)
assert (y.shape == (1, classes))
if __name__ == "__main__":
_test()
| 25,848 | 36.846266 | 120 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.