text
stringlengths
1
93.6k
print("gpu: {}, world_size: {}".format(gpu, world_size))
print("dist_url: ", dist_url)
torch.cuda.set_device(gpu)
args.batch_size = args.batch_size // world_size
args.batch_size_val = args.batch_size_val // world_size
args.num_workers = args.num_workers // world_size
dist.init_process_group(backend='nccl', init_method=dist_url, world_size=world_size, rank=gpu)
if gpu == 0:
logger.info("args.batch_size: {}, args.batch_size_val: {}".format(args.batch_size, args.batch_size_val))
device = torch.device("cuda" if not args.cpu else "cpu")
args.world_size = world_size
if gpu == 0:
logger.info("args: {}".format(args))
# cudnn.enabled = True
# Create network
if args.model == 'DeepLab':
if args.resume:
resume_weight = torch.load(args.resume, map_location='cpu')
print("args.resume: ", args.resume)
# feature_extractor_weights = resume_weight['model_state_dict']
model_B2_weights = resume_weight['model_B2_state_dict']
model_B_weights = resume_weight['model_B_state_dict']
head_weights = resume_weight['head_state_dict']
classifier_weights = resume_weight['classifier_state_dict']
# feature_extractor_weights = {k.replace("module.", ""):v for k,v in feature_extractor_weights.items()}
model_B2_weights = {k.replace("module.", ""):v for k,v in model_B2_weights.items()}
model_B_weights = {k.replace("module.", ""):v for k,v in model_B_weights.items()}
head_weights = {k.replace("module.", ""):v for k,v in head_weights.items()}
classifier_weights = {k.replace("module.", ""):v for k,v in classifier_weights.items()}
if gpu == 0:
logger.info("freeze_bn: {}".format(args.freeze_bn))
model = resnet_feature_extractor('resnet101', 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', freeze_bn=args.freeze_bn)
if args.layer == 0:
ndf = 64
model_B2 = nn.Sequential(model.backbone.conv1, model.backbone.bn1, model.backbone.relu, model.backbone.maxpool)
model_B = nn.Sequential(model.backbone.layer1, model.backbone.layer2, model.backbone.layer3, model.backbone.layer4)
elif args.layer == 1:
ndf = 256
model_B2 = nn.Sequential(model.backbone.conv1, model.backbone.bn1, model.backbone.relu, model.backbone.maxpool, model.backbone.layer1)
model_B = nn.Sequential(model.backbone.layer2, model.backbone.layer3, model.backbone.layer4)
elif args.layer == 2:
ndf = 512
model_B2 = nn.Sequential(model.backbone.conv1, model.backbone.bn1, model.backbone.relu, model.backbone.maxpool, model.backbone.layer1, model.backbone.layer2)
model_B = nn.Sequential(model.backbone.layer3, model.backbone.layer4)
if args.resume:
model_B2.load_state_dict(model_B2_weights)
model_B.load_state_dict(model_B_weights)
classifier = ASPP_Classifier_Gen(2048, [6, 12, 18, 24], [6, 12, 18, 24], args.num_classes, hidden_dim=args.hidden_dim)
head, classifier = classifier.head, classifier.classifier
if args.resume:
head.load_state_dict(head_weights)
classifier.load_state_dict(classifier_weights)
model_B2.train()
model_B.train()
head.train()
classifier.train()
if gpu == 0:
logger.info(model_B2)
logger.info(model_B)
logger.info(head)
logger.info(classifier)
else:
logger = None
if gpu == 0:
logger.info("args.noaug: {}, args.resize: {}, args.rcrop: {}, args.hflip: {}, args.noshuffle: {}, args.no_droplast: {}".format(args.noaug, args.resize, args.rcrop, args.hflip, args.noshuffle, args.no_droplast))
args.rcrop = [int(x.strip()) for x in args.rcrop.split(",")]
args.clrjit_params = [float(x) for x in args.clrjit_params.split(',')]
datasets = create_dataset(args, logger)
sourceloader_iter = enumerate(datasets.source_train_loader)
targetloader_iter = enumerate(datasets.target_train_loader)
# define model
model_B2 = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model_B2)
model_B2 = torch.nn.parallel.DistributedDataParallel(model_B2.cuda(), device_ids=[gpu], find_unused_parameters=True)
model_B = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model_B)
model_B = torch.nn.parallel.DistributedDataParallel(model_B.cuda(), device_ids=[gpu], find_unused_parameters=True)
head = torch.nn.SyncBatchNorm.convert_sync_batchnorm(head)
head = torch.nn.parallel.DistributedDataParallel(head.cuda(), device_ids=[gpu], find_unused_parameters=True)
classifier = torch.nn.SyncBatchNorm.convert_sync_batchnorm(classifier)
classifier = torch.nn.parallel.DistributedDataParallel(classifier.cuda(), device_ids=[gpu], find_unused_parameters=True)
seg_loss = torch.nn.CrossEntropyLoss(ignore_index=args.ignore_label)
interp = nn.Upsample(size=(args.rcrop[1], args.rcrop[0]), mode='bilinear', align_corners=True)