File size: 1,966 Bytes
38e9b9a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
data:
dataset: LSUN
category: church_outdoor
image_size: 64
channels: 3
logit_transform: false
uniform_dequantization: false
gaussian_dequantization: false
random_flip: true
rescaled: false
num_workers: 32
model:
sigma_begin: 140
num_classes: 788
ema: true
ema_rate: 0.999
spec_norm: false
sigma_dist: geometric
sigma_end: 0.01
normalization: InstanceNorm++
nonlinearity: elu
ngf: 128
unet: false
dropout: 0.0
sampling:
noise_first: false
clamp: false
experiment_name: 'logits_evolution_tracking_best_point'
training:
log_all_sigmas: true # whether to send training information to tensorboard
optim:
weight_decay: 0.000
optimizer: Adam
lr: 0.0001
beta1: 0.9
beta2: 0.999
adv_beta1: -0.5
adv_beta2: 0.9
amsgrad: false
eps: 0.00000001
momentum: 0.9
fast_fid:
batch_size: 50
num_samples: 1000
adversarial:
lambda_dae: 1 # multiplier term for Lp loss function (only used in GAN setting)
lambda_D: 1 # multiplier term for GAN loss of the discriminator'
lambda_G_gan: 1 # multiplier term for GAN loss of the generator
D_steps: 1 # Discriminator steps per Generator step
adv_loss: LSGAN # 'GAN, LSGAN, HingeGAN, RpGAN, RaGAN, RaLSGAN, RaHingeGAN'
arch: 2 # 0 is DCGAN_D0, 1 is DCGAN_D1, 2 is BigGAN
spectral: false # If True, spectral normalization in D
no_batch_norm_D: false # Not active in BigGAN
adv_clamp: true # If True, do not clamp output of score network before giving to discriminator
biggan:
ch: 64 # Number of channels
thin: false # If True, use thin Discriminator (D_ch = 0 with D_thin = True leads to )
kernel_size: 3
attn: '64' # Number of attention filters (If 0, do not use self-attention)
n_classes: 1 # Number of classes of the dataset (If = 1 leads to Unconditional GAN) # inactive
init: xavier # Type of init,: ortho, xavier, N02
|