text
stringlengths 1
93.6k
|
|---|
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
|
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
|
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
|
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
|
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
|
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
|
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
|
}
|
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
|
"""3x3 convolution with padding"""
|
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
|
padding=dilation, groups=groups, bias=False, dilation=dilation)
|
def conv1x1(in_planes, out_planes, stride=1):
|
"""1x1 convolution"""
|
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
|
class BasicBlock(nn.Module):
|
expansion = 1
|
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
|
base_width=64, dilation=1, norm_layer=None):
|
super(BasicBlock, self).__init__()
|
if norm_layer is None:
|
norm_layer = nn.BatchNorm2d
|
if groups != 1 or base_width != 64:
|
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
|
if dilation > 1:
|
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
|
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
|
self.conv1 = conv3x3(inplanes, planes, stride)
|
self.bn1 = norm_layer(planes)
|
self.relu = nn.ReLU(inplace=True)
|
self.conv2 = conv3x3(planes, planes)
|
self.bn2 = norm_layer(planes)
|
self.downsample = downsample
|
self.stride = stride
|
def forward(self, x):
|
identity = x
|
out = self.conv1(x)
|
out = self.bn1(out)
|
out = self.relu(out)
|
out = self.conv2(out)
|
out = self.bn2(out)
|
if self.downsample is not None:
|
identity = self.downsample(x)
|
out += identity
|
out = self.relu(out)
|
return out
|
class Bottleneck(nn.Module):
|
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
|
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
|
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
|
# This variant is also known as ResNet V1.5 and improves accuracy according to
|
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
|
expansion = 4
|
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
|
base_width=64, dilation=1, norm_layer=None):
|
super(Bottleneck, self).__init__()
|
if norm_layer is None:
|
norm_layer = nn.BatchNorm2d
|
width = int(planes * (base_width / 64.)) * groups
|
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
|
self.conv1 = conv1x1(inplanes, width)
|
self.bn1 = norm_layer(width)
|
self.conv2 = conv3x3(width, width, stride, groups, dilation)
|
self.bn2 = norm_layer(width)
|
self.conv3 = conv1x1(width, planes * self.expansion)
|
self.bn3 = norm_layer(planes * self.expansion)
|
self.relu = nn.ReLU(inplace=True)
|
self.downsample = downsample
|
self.stride = stride
|
def forward(self, x):
|
identity = x
|
out = self.conv1(x)
|
out = self.bn1(out)
|
out = self.relu(out)
|
out = self.conv2(out)
|
out = self.bn2(out)
|
out = self.relu(out)
|
out = self.conv3(out)
|
out = self.bn3(out)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.