text stringlengths 1 93.6k |
|---|
n_g_nets=1,
|
final_bottleneck_dim=0,
|
# logging
|
log_step=10,
|
# others
|
save_dir='./checkpoints',
|
):
|
logger = PythonLogger()
|
logger.log('preparing train loader...')
|
tr_loader = get_imagenet_dataloader(train_root,
|
batch_size=batch_size,
|
train=True)
|
logger.log('preparing val loader...')
|
val_loaders = {}
|
val_loaders['biased'] = get_imagenet_dataloader(val_root,
|
batch_size=batch_size,
|
train=False)
|
val_loaders['unbiased'] = get_imagenet_dataloader(val_root,
|
batch_size=batch_size,
|
train=False)
|
val_loaders['imagenet-a'] = get_imagenet_dataloader(imageneta_root,
|
batch_size=batch_size,
|
train=False,
|
val_data='ImageNet-A')
|
logger.log('preparing trainer...')
|
if scheduler == 'StepLR':
|
f_scheduler_config = {'step_size': lr_step_size}
|
g_scheduler_config = {'step_size': lr_step_size}
|
elif scheduler == 'CosineAnnealingLR':
|
f_scheduler_config = {'T_max': n_epochs}
|
g_scheduler_config = {'T_max': n_epochs}
|
else:
|
raise NotImplementedError
|
if outer_criterion == 'LearnedMixin':
|
outer_criterion_config = {'feat_dim': 512, 'num_classes': 9}
|
elif outer_criterion == 'RUBi':
|
outer_criterion_config = {'feat_dim': 512}
|
else:
|
outer_criterion_config = {'sigma_x': rbf_sigma_x, 'sigma_y': rbf_sigma_y,
|
'algorithm': hsic_alg}
|
engine = ImageNetTrainer(
|
outer_criterion=outer_criterion,
|
inner_criterion=inner_criterion,
|
outer_criterion_config=outer_criterion_config,
|
outer_criterion_detail={'sigma_x_type': rbf_sigma_x,
|
'sigma_y_type': rbf_sigma_y,
|
'sigma_x_scale': rbf_sigma_scale_x,
|
'sigma_y_scale': rbf_sigma_scale_y},
|
inner_criterion_config={'sigma_x': rbf_sigma_x, 'sigma_y': rbf_sigma_y,
|
'algorithm': hsic_alg},
|
inner_criterion_detail={'sigma_x_type': rbf_sigma_x,
|
'sigma_y_type': rbf_sigma_y,
|
'sigma_x_scale': rbf_sigma_scale_x,
|
'sigma_y_scale': rbf_sigma_scale_y},
|
n_epochs=n_epochs,
|
n_f_pretrain_epochs=n_f_pretrain_epochs,
|
n_g_pretrain_epochs=n_g_pretrain_epochs,
|
f_config={'feature_pos': feature_pos,
|
'num_classes': num_classes},
|
g_config={'feature_pos': feature_pos,
|
'num_classes': num_classes},
|
optimizer=optim,
|
f_optim_config={'lr': lr, 'weight_decay': 1e-4},
|
g_optim_config={'lr': lr, 'weight_decay': 1e-4},
|
f_scheduler_config=f_scheduler_config,
|
g_scheduler_config=g_scheduler_config,
|
scheduler=scheduler,
|
f_lambda_outer=f_lambda_outer,
|
g_lambda_inner=g_lambda_inner,
|
n_g_update=n_g_update,
|
update_g_cls=update_g_cls,
|
n_g_nets=n_g_nets,
|
train_loader=tr_loader,
|
logger=logger,
|
log_step=log_step)
|
engine.train(tr_loader, val_loaders=val_loaders,
|
val_epoch_step=1,
|
update_sigma_per_epoch=update_sigma_per_epoch,
|
save_dir=save_dir)
|
if __name__ == '__main__':
|
fire.Fire(main)
|
# <FILESEP>
|
import torch
|
import torch.optim as optim
|
import numpy as np
|
from PIL import Image
|
import pano
|
def vecang(vec1, vec2):
|
vec1 = vec1 / np.sqrt((vec1 ** 2).sum())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.