text
stringlengths
1
93.6k
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float)
parser.add_argument('--gamma',
default=0.1,
type=float,
help='learing rate multiplier')
parser.add_argument('--input-size',
default=112,
type=int,
help='input size (default: 112x112)')
parser.add_argument('--feature-dim',
default=256,
type=int,
metavar='D',
help='feature dimension (default: 256)')
parser.add_argument('--num-classes',
default=1000,
type=int,
metavar='N',
help='number of classes (default: 1000)')
parser.add_argument('--sample-num',
default=1000,
type=int,
help='sampling number of classes out of all classes')
parser.add_argument('--print-freq',
default=100,
type=int,
help='logger.info frequency (default: 10)')
parser.add_argument('--resume',
default='',
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--save-path',
default='checkpoints/ckpt',
type=str,
help='path to store checkpoint (default: checkpoints)')
parser.add_argument('-e',
'--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--sampled',
dest='sampled',
action='store_true',
help='sampling from full softmax')
parser.add_argument('--classifier-type',
default='linear',
choices=classifier_types,
help='choose different type of classifier')
parser.add_argument('--distributed',
dest='distributed',
action='store_true',
help='distributed training')
parser.add_argument('--dist-addr',
default='127.0.0.1',
type=str,
help='distributed address')
parser.add_argument('--dist-port',
default='23456',
type=str,
help='distributed port')
parser.add_argument('--dist-backend',
default='nccl',
type=str,
help='distributed backend')
parser.add_argument('--tmp-client-id',
default=9999,
type=int,
help='tmp client used to communicate with paramserver')
best_prec1 = 0
def main():
global args, best_prec1
args = parser.parse_args()
# init dist
gpu_num = torch.cuda.device_count()
if args.distributed:
args.rank, args.world_size = init_processes(args.dist_addr,
args.dist_port, gpu_num,
args.dist_backend)
print("=> using {} GPUS for distributed training".format(
args.world_size))
else:
args.rank = 0
print("=> using {} GPUS for training".format(gpu_num))
# create logger
if args.rank == 0:
mkdir_if_no_exist(args.save_path,
subdirs=['events/', 'logs/', 'checkpoints/'])
tb_logger = SummaryWriter('{}/events'.format(args.save_path))
logger = create_logger('global_logger',
'{}/logs/log.txt'.format(args.save_path))
logger.debug(args) # log args only to file
else:
tb_logger = None
logger = None