text
stringlengths 1
93.6k
|
|---|
parser.add_argument('--recount', type=int, default=1,
|
help='Random erase count (default: 1)')
|
parser.add_argument('--resplit', action='store_true', default=False,
|
help='Do not random erase first (clean) augmentation split')
|
# * Mixup params
|
parser.add_argument('--mixup', type=float, default=0,
|
help='mixup alpha, mixup enabled if > 0.')
|
parser.add_argument('--cutmix', type=float, default=0,
|
help='cutmix alpha, cutmix enabled if > 0.')
|
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
|
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
|
parser.add_argument('--mixup_prob', type=float, default=1.0,
|
help='Probability of performing mixup or cutmix when either/both is enabled')
|
parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
|
help='Probability of switching to cutmix when both mixup and cutmix enabled')
|
parser.add_argument('--mixup_mode', type=str, default='batch',
|
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
|
# * Finetuning params
|
parser.add_argument('--finetune', default='',
|
help='finetune from checkpoint')
|
parser.add_argument('--global_pool', action='store_true')
|
parser.set_defaults(global_pool=True)
|
parser.add_argument('--cls_token', action='store_false', dest='global_pool',
|
help='Use class token instead of global pool for classification')
|
# Dataset parameters
|
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
|
help='dataset path')
|
parser.add_argument('--nb_classes', default=1000, type=int,
|
help='number of the classification types')
|
parser.add_argument('--output_dir', default='./output_dir',
|
help='path where to save, empty for no saving')
|
parser.add_argument('--log_dir', default='./output_dir',
|
help='path where to tensorboard log')
|
parser.add_argument('--device', default='cuda',
|
help='device to use for training / testing')
|
parser.add_argument('--seed', default=0, type=int)
|
parser.add_argument('--resume', default='',
|
help='resume from checkpoint')
|
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
|
help='start epoch')
|
parser.add_argument('--eval', action='store_true',
|
help='Perform evaluation only')
|
parser.add_argument('--dist_eval', action='store_true', default=False,
|
help='Enabling distributed evaluation (recommended during training for faster monitor')
|
parser.add_argument('--num_workers', default=10, type=int)
|
parser.add_argument('--pin_mem', action='store_true',
|
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
|
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
|
parser.set_defaults(pin_mem=True)
|
# distributed training parameters
|
parser.add_argument('--world_size', default=1, type=int,
|
help='number of distributed processes')
|
parser.add_argument('--local_rank', default=-1, type=int)
|
parser.add_argument('--dist_on_itp', action='store_true')
|
parser.add_argument('--dist_url', default='env://',
|
help='url used to set up distributed training')
|
return parser
|
def main(args):
|
misc.init_distributed_mode(args)
|
print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__))))
|
print("{}".format(args).replace(', ', ',\n'))
|
device = torch.device(args.device)
|
# fix the seed for reproducibility
|
seed = args.seed + misc.get_rank()
|
torch.manual_seed(seed)
|
np.random.seed(seed)
|
cudnn.benchmark = True
|
dataset_train = build_dataset(is_train=True, args=args)
|
dataset_val = build_dataset(is_train=False, args=args)
|
if True: # args.distributed:
|
num_tasks = misc.get_world_size()
|
global_rank = misc.get_rank()
|
sampler_train = torch.utils.data.DistributedSampler(
|
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
|
)
|
print("Sampler_train = %s" % str(sampler_train))
|
if args.dist_eval:
|
if len(dataset_val) % num_tasks != 0:
|
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
|
'This will slightly alter validation results as extra duplicate entries are added to achieve '
|
'equal num of samples per-process.')
|
sampler_val = torch.utils.data.DistributedSampler(
|
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=True) # shuffle=True to reduce monitor bias
|
else:
|
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.