text stringlengths 1 93.6k |
|---|
data_transforms = {
|
source_data: transforms.Compose([
|
transforms.Scale((256, 256)),
|
transforms.RandomHorizontalFlip(),
|
transforms.RandomCrop(224),
|
transforms.ToTensor(),
|
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
]),
|
target_data: transforms.Compose([
|
transforms.Scale((256, 256)),
|
transforms.RandomHorizontalFlip(),
|
transforms.RandomCrop(224),
|
transforms.ToTensor(),
|
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
]),
|
evaluation_data: transforms.Compose([
|
transforms.Scale((256, 256)),
|
transforms.CenterCrop(224),
|
transforms.ToTensor(),
|
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
|
]),
|
}
|
use_gpu = torch.cuda.is_available()
|
source_loader, target_loader, \
|
test_loader, target_folder = get_loader(source_data, target_data,
|
evaluation_data, data_transforms,
|
batch_size=batch_size, return_id=True,
|
balanced=conf.data.dataloader.class_balance)
|
dataset_test = test_loader
|
n_share = conf.data.dataset.n_share
|
n_source_private = conf.data.dataset.n_source_private
|
num_class = n_share + n_source_private
|
G, C1 = get_model_mme(conf.model.base_model, num_class=num_class,
|
temp=conf.model.temp)
|
device = torch.device("cuda")
|
if args.cuda:
|
G.cuda()
|
C1.cuda()
|
G.to(device)
|
C1.to(device)
|
ndata = target_folder.__len__()
|
## Memory
|
lemniscate = LinearAverage(2048, ndata, conf.model.temp, conf.train.momentum).cuda()
|
params = []
|
for key, value in dict(G.named_parameters()).items():
|
if value.requires_grad and "features" in key:
|
if 'bias' in key:
|
params += [{'params': [value], 'lr': conf.train.multi,
|
'weight_decay': conf.train.weight_decay}]
|
else:
|
params += [{'params': [value], 'lr': conf.train.multi,
|
'weight_decay': conf.train.weight_decay}]
|
else:
|
if 'bias' in key:
|
params += [{'params': [value], 'lr': 1.0,
|
'weight_decay': conf.train.weight_decay}]
|
else:
|
params += [{'params': [value], 'lr': 1.0,
|
'weight_decay': conf.train.weight_decay}]
|
criterion = torch.nn.CrossEntropyLoss().cuda()
|
opt_g = optim.SGD(params, momentum=conf.train.sgd_momentum,
|
weight_decay=0.0005, nesterov=True)
|
opt_c1 = optim.SGD(list(C1.parameters()), lr=1.0,
|
momentum=conf.train.sgd_momentum, weight_decay=0.0005,
|
nesterov=True)
|
[G, C1], [opt_g, opt_c1] = amp.initialize([G, C1],
|
[opt_g, opt_c1],
|
opt_level="O1")
|
G = nn.DataParallel(G)
|
C1 = nn.DataParallel(C1)
|
param_lr_g = []
|
for param_group in opt_g.param_groups:
|
param_lr_g.append(param_group["lr"])
|
param_lr_f = []
|
for param_group in opt_c1.param_groups:
|
param_lr_f.append(param_group["lr"])
|
def train():
|
criterion = nn.CrossEntropyLoss().cuda()
|
print('train start!')
|
data_iter_s = iter(source_loader)
|
data_iter_t = iter(target_loader)
|
len_train_source = len(source_loader)
|
len_train_target = len(target_loader)
|
for step in range(conf.train.min_step + 1):
|
G.train()
|
C1.train()
|
if step % len_train_target == 0:
|
data_iter_t = iter(target_loader)
|
if step % len_train_source == 0:
|
data_iter_s = iter(source_loader)
|
data_t = next(data_iter_t)
|
data_s = next(data_iter_s)
|
inv_lr_scheduler(param_lr_g, opt_g, step,
|
init_lr=conf.train.lr,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.