Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
null | OpenOOD-main/configs/pipelines/train/train_rd4ad.yml | exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'_'@{mark}'"
output_dir: ./results/
save_output: True
force_merge: False
merge_option: merge
mark: default
num_gpus: 1
num_workers: 0
num_machines: 1
machine_rank: 0
preprocessor:
name: base
pipeline:
name: train_ad
trainer:
name: rd4ad
evaluator:
name: ad
optimizer:
name: adam
num_epochs: 200
lr: 0.005
betas: [0.5,0.999]
recorder:
name: rd4ad
save_all_models: False
| 506 | 13.911765 | 122 | yml |
null | OpenOOD-main/configs/pipelines/train/train_regmixup.yml | exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'\
_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'\
_alpha'@{trainer.trainer_args.alpha}'_'@{mark}'/s'@{seed}'"
output_dir: ./results/
save_output: True
merge_option: default
mark: default
seed: 0
num_gpus: 1
num_workers: 8
num_machines: 1
machine_rank: 0
pipeline:
name: train
trainer:
name: regmixup
trainer_args:
alpha: 20
evaluator:
name: base
optimizer:
name: sgd
num_epochs: 100
lr: 0.1
momentum: 0.9
weight_decay: 0.0005
recorder:
name: base
save_all_models: False
| 567 | 14.777778 | 65 | yml |
null | OpenOOD-main/configs/pipelines/train/train_sem.yml | exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'\
_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'"
output_dir: ./results/
save_output: True
merge_option: default
num_gpus: 1
num_workers: 0
num_machines: 1
machine_rank: 0
pipeline:
name: finetune
network:
pretrained: True
trainer:
name: sae
trainer_args:
num_clusters: 3
feature_type: stat # flat/mean/stat
reduce_dim: pca_50 # none/capca_10/pca_50
loss_weight: [0.5, 0.5, 0.1, 0.1] # [cls_std, cls_mix, sae_id, sae_ood]
alpha: 0.5
evaluator:
name: base
optimizer:
name: sgd
num_epochs: 30
lr: 0.05
momentum: 0.9
weight_decay: 0.0005
recorder:
name: base
save_all_models: False
| 697 | 16.45 | 75 | yml |
null | OpenOOD-main/configs/pipelines/train/train_udg.yml | pipeline:
name: train_oe
trainer:
name: udg
num_clusters: 1000
pca_dim: 256
purity_ind_thresh: 0.8
purity_ood_thresh: 0.8
oe_enhance_ratio: 2.0
lambda_oe: 0.5
lambda_aux: 0.1
| 194 | 14 | 24 | yml |
null | OpenOOD-main/configs/pipelines/train/train_vos.yml | exp_name: "'@{dataset.name}'_'@{network.name}'_'@{trainer.name}'_e'@{optimizer.num_epochs}'_lr'@{optimizer.lr}'_'@{mark}'/s'@{seed}'"
output_dir: ./results/
save_output: True
merge_option: default # disabled if 'save_output' is False
num_classes: '@{dataset.num_classes}'
mark: default
seed: 0
num_gpus: 1
num_workers: 8
num_machines: 1
machine_rank: 0
sample_number: 1000
sample_from: 10000
select: 1
feature_dim: 512 #resnet 512, lenet 120
pipeline:
name: train
trainer:
name: vos
loss_weight: 0.1
evaluator:
name: base
optimizer:
num_epochs: 100
lr: 0.1
momentum: 0.9
weight_decay: 5.0e-4
recorder:
name: base
save_all_models: False
preprocessor:
name: base
start_epoch: 0
| 709 | 15.136364 | 133 | yml |
null | OpenOOD-main/configs/postprocessors/ash.yml | postprocessor:
name: ash
APS_mode: True
postprocessor_args:
percentile: 90
postprocessor_sweep:
percentile_list: [65, 70, 75, 80, 85, 90, 95]
| 158 | 18.875 | 49 | yml |
null | OpenOOD-main/configs/postprocessors/cider.yml | postprocessor:
name: cider
APS_mode: True
postprocessor_args:
K: 50
postprocessor_sweep:
K_list: [50, 100, 200, 500, 1000]
| 139 | 16.5 | 37 | yml |
null | OpenOOD-main/configs/postprocessors/conf_branch.yml | postprocessor:
name: conf_branch
APS_mode: False
| 53 | 12.5 | 19 | yml |
null | OpenOOD-main/configs/postprocessors/cutpaste.yml | postprocessor:
name: cutpaste
| 32 | 10 | 16 | yml |
null | OpenOOD-main/configs/postprocessors/dice.yml | postprocessor:
name: dice
APS_mode: False
postprocessor_args:
p: 90
postprocessor_sweep:
p_list: [90]
| 118 | 13.875 | 22 | yml |
null | OpenOOD-main/configs/postprocessors/draem.yml | postprocessor:
name: draem
| 29 | 9 | 14 | yml |
null | OpenOOD-main/configs/postprocessors/dropout.yml | postprocessor:
name: dropout
APS_mode: False
postprocessor_args:
dropout_p: 0.5
dropout_times: 5
| 111 | 15 | 21 | yml |
null | OpenOOD-main/configs/postprocessors/dsvdd.yml | postprocessor:
name: dsvdd
| 29 | 9 | 14 | yml |
null | OpenOOD-main/configs/postprocessors/ebo.yml | postprocessor:
name: ebo
APS_mode: True
postprocessor_args:
temperature: 1
postprocessor_sweep:
temperature_list: [1, 10, 100, 1000]
| 149 | 17.75 | 40 | yml |
null | OpenOOD-main/configs/postprocessors/ensemble.yml | postprocessor:
name: ensemble
postprocessor_args:
network_name: lenet
checkpoint_root: ./results/lenet_ensemble_pretrained
checkpoints: [net1, net2, net3, net4, net5]
num_networks: 5 # number of networks to ensembel
| 237 | 28.75 | 56 | yml |
null | OpenOOD-main/configs/postprocessors/gmm.yml | exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\
_'@{evaluator.name}'_'@{postprocessor.name}'\
_'@{postprocessor.postprocessor_args.num_clusters_list}'\
_'@{postprocessor.postprocessor_args.alpha_list}'\
_'@{postprocessor.postprocessor_args.reduce_dim_list}'\
_'@{mark}'"
postprocessor:
name: gmm
postprocessor_args:
num_clusters_list: [3, 1, 50]
feature_type_list: [stat, mean, flat] # flat/mean/stat
alpha_list: [0, 0, 1]
reduce_dim_list: [none, none, pca_50] # none/capca_10/pca_50
| 530 | 34.4 | 66 | yml |
null | OpenOOD-main/configs/postprocessors/godin.yml | postprocessor:
name: godin
APS_mode: False
postprocessor_args:
score_func: h # use h or g
noise_magnitude: 0.0025 # in range of [0, 0.0025, 0.005, 0.01, 0.02, 0.04, 0.08]
| 189 | 26.142857 | 86 | yml |
null | OpenOOD-main/configs/postprocessors/gradnorm.yml | postprocessor:
name: gradnorm
APS_mode: False
postprocessor_args:
| 72 | 13.6 | 21 | yml |
null | OpenOOD-main/configs/postprocessors/gram.yml | postprocessor:
name: gram
APS_mode: True
postprocessor_args:
powers: [1,2,3,4,5]
postprocessor_sweep:
powers_list: [[1,2,3,4,5]]
| 145 | 17.25 | 30 | yml |
null | OpenOOD-main/configs/postprocessors/kdad.yml | postprocessor:
name: msp
| 29 | 9 | 14 | yml |
null | OpenOOD-main/configs/postprocessors/klm.yml | postprocessor:
name: klm
APS_mode: False
| 45 | 10.5 | 17 | yml |
null | OpenOOD-main/configs/postprocessors/knn.yml | postprocessor:
name: knn
APS_mode: True
postprocessor_args:
K: 50
postprocessor_sweep:
K_list: [50, 100, 200, 500, 1000]
| 137 | 16.25 | 37 | yml |
null | OpenOOD-main/configs/postprocessors/mcd.yml | postprocessor:
name: mcd
APS_mode: False
| 45 | 10.5 | 17 | yml |
null | OpenOOD-main/configs/postprocessors/mds.yml | postprocessor:
name: mds
APS_mode: False
| 45 | 10.5 | 17 | yml |
null | OpenOOD-main/configs/postprocessors/mds_ensemble.yml | postprocessor:
name: mds_ensemble
APS_mode: True
postprocessor_args:
noise: 0.0014
feature_type_list: [mean] # flat/mean/stat
alpha_list: [1]
reduce_dim_list: [none] # none/capca/pca_50/lda
postprocessor_sweep:
noise_list: [0.0014]
| 266 | 23.272727 | 53 | yml |
null | OpenOOD-main/configs/postprocessors/mls.yml | postprocessor:
name: mls
APS_mode: False
postprocessor_args:
| 67 | 12.6 | 21 | yml |
null | OpenOOD-main/configs/postprocessors/mos.yml | postprocessor:
name: mos
postprocessor_args:
coreset_sampling_ratio: 0.01
| 82 | 15.6 | 32 | yml |
null | OpenOOD-main/configs/postprocessors/msp.yml | postprocessor:
name: msp
APS_mode: False
| 45 | 10.5 | 17 | yml |
null | OpenOOD-main/configs/postprocessors/npos.yml | postprocessor:
name: npos
APS_mode: True
postprocessor_args:
K: 50
postprocessor_sweep:
K_list: [50, 100, 200, 500, 1000]
| 138 | 16.375 | 37 | yml |
null | OpenOOD-main/configs/postprocessors/odin.yml | postprocessor:
name: odin
APS_mode: True
postprocessor_args:
temperature: 1000
noise: 0.0014
postprocessor_sweep:
temperature: [1, 10, 100, 1000]
noise: [0.0014, 0.0028]
| 194 | 18.5 | 35 | yml |
null | OpenOOD-main/configs/postprocessors/opengan.yml | postprocessor:
name: opengan
APS_mode: False
| 53 | 12.5 | 19 | yml |
null | OpenOOD-main/configs/postprocessors/openmax.yml | postprocessor:
name: openmax
APS_mode: False
postprocessor_args:
coreset_sampling_ratio: 0.01
n_neighbors: 9
phase: test
category: test
save_src_code: True
save_anomaly_map: True
noise:
feature_type_list:
| 243 | 17.769231 | 32 | yml |
null | OpenOOD-main/configs/postprocessors/patch.yml | postprocessor:
name: patchcore
postprocessor_args:
coreset_sampling_ratio: 0.01
n_neighbors: 9
phase: test
category: hazelnut
save_src_code: True
save_anomaly_map: True
noise:
feature_type_list:
| 231 | 18.333333 | 32 | yml |
null | OpenOOD-main/configs/postprocessors/rankfeat.yml | postprocessor:
name: rankfeat
APS_mode: False
postprocessor_args:
accelerate: False
temperature: 1
| 113 | 15.285714 | 21 | yml |
null | OpenOOD-main/configs/postprocessors/rd4ad.yml | postprocessor:
name: rd4ad
APS_mode: False
| 47 | 11 | 17 | yml |
null | OpenOOD-main/configs/postprocessors/react.yml | postprocessor:
name: react
APS_mode: True
postprocessor_args:
percentile: 90
postprocessor_sweep:
percentile_list: [85, 90, 95, 99]
| 148 | 17.625 | 37 | yml |
null | OpenOOD-main/configs/postprocessors/residual.yml | postprocessor:
name: residual
postprocessor_args:
dim: 512
| 67 | 12.6 | 21 | yml |
null | OpenOOD-main/configs/postprocessors/rmds.yml | postprocessor:
name: rel_mds
APS_mode: False
| 49 | 11.5 | 17 | yml |
null | OpenOOD-main/configs/postprocessors/rot.yml | postprocessor:
name: rot
APS_mode: False
| 45 | 10.5 | 17 | yml |
null | OpenOOD-main/configs/postprocessors/rotpred.yml | postprocessor:
name: rot
APS_mode: False
| 45 | 10.5 | 17 | yml |
null | OpenOOD-main/configs/postprocessors/rts.yml | postprocessor:
name: rts
APS_mode: False
postprocessor_args:
ood_score: 'var' # msp or var | 100 | 19.2 | 33 | yml |
null | OpenOOD-main/configs/postprocessors/she.yml | postprocessor:
name: she
APS_mode: False
postprocessor_args:
metric: inner_product
| 93 | 14.666667 | 25 | yml |
null | OpenOOD-main/configs/postprocessors/ssd.yml | postprocessor:
name: mds
APS_mode: True
postprocessor_args:
noise: 0.0014
feature_type_list: [mean] # flat/mean/stat
alpha_list: [1]
reduce_dim_list: [none] # none/capca/pca_50/lda
postprocessor_sweep:
noise_list: [0.0014]
| 257 | 22.454545 | 53 | yml |
null | OpenOOD-main/configs/postprocessors/temp_scaling.yml | postprocessor:
name: temperature_scaling
APS_mode: False
| 61 | 14.5 | 27 | yml |
null | OpenOOD-main/configs/postprocessors/vim.yml | postprocessor:
name: vim
APS_mode: True
postprocessor_args:
dim: 256
postprocessor_sweep:
dim_list: [256, 1000]
| 128 | 15.125 | 25 | yml |
null | OpenOOD-main/configs/postprocessors/_gmm_iter/cifar_gmm_0.yml | exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\
_'@{evaluator.name}'_'@{postprocessor.name}'\
_'@{postprocessor.postprocessor_args.num_clusters_list}'\
_'@{postprocessor.postprocessor_args.feature_type_list}'\
_'@{postprocessor.postprocessor_args.alpha_list}'\
_'@{postprocessor.postprocessor_args.reduce_dim_list}'\
_'@{mark}'"
postprocessor:
name: gmm
postprocessor_args:
num_clusters_list: [3, 1, 1, 1, 10]
feature_type_list: [stat, mean, mean, mean, flat] # flat/mean/stat
alpha_list: [-0.0001, 0, 0, 0, 1]
reduce_dim_list: [pca_10, none, none, none, pca_10] # none/capca_10/pca_50
| 632 | 38.5625 | 80 | yml |
null | OpenOOD-main/configs/postprocessors/_gmm_iter/cifar_gmm_1.yml | exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\
_'@{evaluator.name}'_'@{postprocessor.name}'\
_'@{postprocessor.postprocessor_args.num_clusters_list}'\
_'@{postprocessor.postprocessor_args.feature_type_list}'\
_'@{postprocessor.postprocessor_args.alpha_list}'\
_'@{postprocessor.postprocessor_args.reduce_dim_list}'\
_'@{mark}'"
postprocessor:
name: gmm
postprocessor_args:
num_clusters_list: [3, 1, 1, 1, 10]
feature_type_list: [stat, mean, mean, mean, flat] # flat/mean/stat
alpha_list: [-0.0001, 0, 0, 0, 1]
reduce_dim_list: [pca_50, none, none, none, pca_50] # none/capca_10/pca_50
| 632 | 38.5625 | 80 | yml |
null | OpenOOD-main/configs/postprocessors/_gmm_iter/cifar_gmm_2.yml | exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\
_'@{evaluator.name}'_'@{postprocessor.name}'\
_'@{postprocessor.postprocessor_args.num_clusters_list}'\
_'@{postprocessor.postprocessor_args.feature_type_list}'\
_'@{postprocessor.postprocessor_args.alpha_list}'\
_'@{postprocessor.postprocessor_args.reduce_dim_list}'\
_'@{mark}'"
postprocessor:
name: gmm
postprocessor_args:
num_clusters_list: [3, 1, 1, 1, 10]
feature_type_list: [stat, mean, mean, mean, flat] # flat/mean/stat
alpha_list: [-0.0001, 0, 0, 0, 1]
reduce_dim_list: [pca_50, none, none, none, pca_50] # none/capca_10/pca_50
| 632 | 38.5625 | 80 | yml |
null | OpenOOD-main/configs/postprocessors/_gmm_iter/cifar_gmm_3.yml | exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\
_'@{evaluator.name}'_'@{postprocessor.name}'\
_'@{postprocessor.postprocessor_args.num_clusters_list}'\
_'@{postprocessor.postprocessor_args.feature_type_list}'\
_'@{postprocessor.postprocessor_args.alpha_list}'\
_'@{postprocessor.postprocessor_args.reduce_dim_list}'\
_'@{mark}'"
postprocessor:
name: gmm
postprocessor_args:
num_clusters_list: [3, 1, 1, 1, 10]
feature_type_list: [stat, mean, mean, mean, flat] # flat/mean/stat
alpha_list: [-0.0001, 0, 0, 0, 1]
reduce_dim_list: [pca_50, none, none, none, pca_50] # none/capca_10/pca_50
| 632 | 38.5625 | 80 | yml |
null | OpenOOD-main/configs/postprocessors/_gmm_iter/cifar_gmm_4.yml | exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\
_'@{evaluator.name}'_'@{postprocessor.name}'\
_'@{postprocessor.postprocessor_args.num_clusters_list}'\
_'@{postprocessor.postprocessor_args.feature_type_list}'\
_'@{postprocessor.postprocessor_args.alpha_list}'\
_'@{postprocessor.postprocessor_args.reduce_dim_list}'\
_'@{mark}'"
postprocessor:
name: gmm
postprocessor_args:
num_clusters_list: [3, 1, 1, 1, 10]
feature_type_list: [stat, mean, mean, mean, flat] # flat/mean/stat
alpha_list: [-0.0001, 0, 0, 0, 1]
reduce_dim_list: [pca_50, none, none, none, pca_50] # none/capca_10/pca_50
| 632 | 38.5625 | 80 | yml |
null | OpenOOD-main/configs/postprocessors/_gmm_iter/covid_gmm_0.yml | exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\
_'@{evaluator.name}'_'@{postprocessor.name}'\
_'@{postprocessor.postprocessor_args.num_clusters_list}'\
_'@{postprocessor.postprocessor_args.feature_type_list}'\
_'@{postprocessor.postprocessor_args.alpha_list}'\
_'@{postprocessor.postprocessor_args.reduce_dim_list}'\
_'@{mark}'"
postprocessor:
name: gmm
postprocessor_args:
num_clusters_list: [1, 1, 1, 1, 5]
feature_type_list: [stat, mean, mean, mean, flat] # flat/mean/stat
alpha_list: [-0.001, 0, 0, 0, 1]
reduce_dim_list: [pca_10, pca_10, pca_10, pca_10, pca_10] # none/capca_10/pca_50
| 636 | 38.8125 | 86 | yml |
null | OpenOOD-main/configs/postprocessors/_gmm_iter/mnist_gmm_0.yml | exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\
_'@{evaluator.name}'_gmm0_'@{mark}'"
postprocessor:
name: gmm
postprocessor_args:
num_clusters_list: [3, 1, 10]
feature_type_list: [flat, mean, flat] # flat/mean/stat
alpha_list: [1, 0, 0]
reduce_dim_list: [pca_50, none, pca_50] # none/capca_10/pca_50
| 346 | 30.545455 | 68 | yml |
null | OpenOOD-main/configs/postprocessors/_gmm_iter/mnist_gmm_1.yml | exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\
_'@{evaluator.name}'_gmm1_'@{mark}'"
postprocessor:
name: gmm
postprocessor_args:
num_clusters_list: [3, 1, 10]
feature_type_list: [flat, mean, flat] # flat/mean/stat
alpha_list: [-0.001, 0, 1]
reduce_dim_list: [none, none, pca_50] # none/capca_10/pca_50
| 349 | 30.818182 | 66 | yml |
null | OpenOOD-main/configs/postprocessors/_gmm_iter/mnist_gmm_2.yml | exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\
_'@{evaluator.name}'_gmm2_'@{mark}'"
postprocessor:
name: gmm
postprocessor_args:
num_clusters_list: [3, 1, 10]
feature_type_list: [flat, mean, flat] # flat/mean/stat
alpha_list: [-0.01, 0, 1]
reduce_dim_list: [none, none, pca_50] # none/capca_10/pca_50
| 348 | 30.727273 | 66 | yml |
null | OpenOOD-main/configs/postprocessors/_gmm_iter/mnist_gmm_3.yml | exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\
_'@{evaluator.name}'_gmm3_'@{mark}'"
postprocessor:
name: gmm
postprocessor_args:
num_clusters_list: [3, 1, 10]
feature_type_list: [stat, mean, flat] # flat/mean/stat
alpha_list: [-0.01, 0, 1]
reduce_dim_list: [none, none, pca_50] # none/capca_10/pca_50
| 348 | 30.727273 | 66 | yml |
null | OpenOOD-main/configs/postprocessors/_gmm_iter/mnist_gmm_4.yml | exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\
_'@{evaluator.name}'_gmm4_'@{mark}'"
postprocessor:
name: gmm
postprocessor_args:
num_clusters_list: [3, 1, 10]
feature_type_list: [stat, mean, flat] # flat/mean/stat
alpha_list: [-0.01, 0, 1]
reduce_dim_list: [none, none, pca_50] # none/capca_10/pca_50
| 348 | 30.727273 | 66 | yml |
null | OpenOOD-main/configs/postprocessors/_gmm_iter/mnist_gmm_5.yml | exp_name: "'@{dataset.name}'_'@{network.name}'_'@{pipeline.name}'\
_'@{evaluator.name}'_gmm0_'@{mark}'"
postprocessor:
name: gmm
postprocessor_args:
num_clusters_list: [3, 1, 10]
feature_type_list: [stat, mean, flat] # flat/mean/stat
alpha_list: [-0.01, 0, 1]
reduce_dim_list: [none, none, pca_50] # none/capca_10/pca_50
| 348 | 30.727273 | 66 | yml |
null | OpenOOD-main/configs/preprocessors/augmix_preprocessor.yml | preprocessor:
name: augmix
severity: 1 # see torchvision docs for meaning of the args
all_ops: true
mixture_width: 3
alpha: 1.0
chain_depth: -1
| 156 | 18.625 | 60 | yml |
null | OpenOOD-main/configs/preprocessors/base_preprocessor.yml | preprocessor:
name: base
| 27 | 8.333333 | 13 | yml |
null | OpenOOD-main/configs/preprocessors/csi_preprocessor.yml | preprocessor:
name: csi
| 26 | 8 | 13 | yml |
null | OpenOOD-main/configs/preprocessors/cutout_preprocessor.yml | preprocessor:
name: cutout
n_holes: 1
length: 16
| 55 | 10.2 | 14 | yml |
null | OpenOOD-main/configs/preprocessors/cutpaste_preprocessor.yml | preprocessor:
name: cutpaste
preprocessor_args:
area_ratio: [0.02, 0.15]
aspect_ratio: 0.3
| 103 | 16.333333 | 28 | yml |
null | OpenOOD-main/configs/preprocessors/draem_preprocessor.yml | preprocessor:
name: draem
preprocessor_args:
image_size: 256
anomaly_source: ./data/images_classic/texture
| 119 | 19 | 49 | yml |
null | OpenOOD-main/configs/preprocessors/patchcore_preprocessor.yml | preprocessor:
name: patchcore
| 32 | 10 | 17 | yml |
null | OpenOOD-main/configs/preprocessors/pixmix_preprocessor.yml | preprocessor:
name: pixmix
preprocessor_args:
mixing_set_dir: data/benchmark_imglist/cifar10/fractals_fvis.txt
aug_severity: 3 # severity of base augmentation operators
all_ops: true # turn on all augmentation operations (+brightness,contrast,color,sharpness
k: 4 # augment the image a random number of times with a maximum of k times (mixing iterations)
beta: 3 # severity of mixing
| 413 | 45 | 102 | yml |
null | OpenOOD-main/configs/preprocessors/randaugment_preprocessor.yml | preprocessor:
name: randaugment
n: 1
m: 14
| 49 | 9 | 19 | yml |
null | OpenOOD-main/openood/__init__.py | 0 | 0 | 0 | py | |
null | OpenOOD-main/openood/datasets/__init__.py | from .utils import get_dataloader, get_feature_dataloader, get_feature_opengan_dataloader, get_ood_dataloader
| 110 | 54.5 | 109 | py |
null | OpenOOD-main/openood/datasets/base_dataset.py | import logging
import random
import traceback
from torch.utils.data import Dataset
class BaseDataset(Dataset):
def __init__(self, pseudo_index=-1, skip_broken=False, new_index='next'):
super(BaseDataset, self).__init__()
self.pseudo_index = pseudo_index
self.skip_broken = skip_broken
self.new_index = new_index
if new_index not in ('next', 'rand'):
raise ValueError('new_index not one of ("next", "rand")')
def __getitem__(self, index):
# in some pytorch versions, input index will be torch.Tensor
index = int(index)
# if sampler produce pseudo_index,
# randomly sample an index, and mark it as pseudo
if index == self.pseudo_index:
index = random.randrange(len(self))
pseudo = 1
else:
pseudo = 0
while True:
try:
sample = self.getitem(index)
break
except Exception as e:
if self.skip_broken and not isinstance(e, NotImplementedError):
if self.new_index == 'next':
new_index = (index + 1) % len(self)
else:
new_index = random.randrange(len(self))
logging.warn(
'skip broken index [{}], use next index [{}]'.format(
index, new_index))
index = new_index
else:
logging.error('index [{}] broken'.format(index))
traceback.print_exc()
logging.error(e)
raise e
sample['index'] = index
sample['pseudo'] = pseudo
return sample
def getitem(self, index):
raise NotImplementedError
| 1,815 | 32.018182 | 79 | py |
null | OpenOOD-main/openood/datasets/feature_dataset.py | from torch.utils.data import Dataset
class FeatDataset(Dataset):
def __init__(self, feat, labels):
self.data = feat
self.labels = labels
self.len = feat.shape[0]
assert self.len == len(labels)
def __len__(self):
return self.len
def __getitem__(self, idx):
data = self.data[idx]
label = self.labels[idx]
return {'data': data, 'label': label}
| 421 | 22.444444 | 45 | py |
null | OpenOOD-main/openood/datasets/imglist_augmix_dataset.py | import ast
import io
import logging
import os
import torch
from PIL import Image, ImageFile
from .base_dataset import BaseDataset
# to fix "OSError: image file is truncated"
ImageFile.LOAD_TRUNCATED_IMAGES = True
class Convert:
def __init__(self, mode='RGB'):
self.mode = mode
def __call__(self, image):
return image.convert(self.mode)
class ImglistAugMixDataset(BaseDataset):
def __init__(self,
name,
imglist_pth,
data_dir,
num_classes,
preprocessor,
data_aux_preprocessor,
maxlen=None,
dummy_read=False,
dummy_size=None,
**kwargs):
super(ImglistAugMixDataset, self).__init__(**kwargs)
self.name = name
with open(imglist_pth) as imgfile:
self.imglist = imgfile.readlines()
self.data_dir = data_dir
self.num_classes = num_classes
self.preprocessor = preprocessor
self.transform_image = preprocessor
self.transform_aux_image = data_aux_preprocessor
self.maxlen = maxlen
self.dummy_read = dummy_read
self.dummy_size = dummy_size
if dummy_read and dummy_size is None:
raise ValueError(
'if dummy_read is True, should provide dummy_size')
def __len__(self):
if self.maxlen is None:
return len(self.imglist)
else:
return min(len(self.imglist), self.maxlen)
def getitem(self, index):
line = self.imglist[index].strip('\n')
tokens = line.split(' ', 1)
image_name, extra_str = tokens[0], tokens[1]
if self.data_dir != '' and image_name.startswith('/'):
raise RuntimeError('image_name starts with "/"')
path = os.path.join(self.data_dir, image_name)
sample = dict()
sample['image_name'] = image_name
kwargs = {'name': self.name, 'path': path, 'tokens': tokens}
try:
# some preprocessor methods require setup
self.preprocessor.setup(**kwargs)
except:
pass
try:
if not self.dummy_read:
with open(path, 'rb') as f:
content = f.read()
filebytes = content
buff = io.BytesIO(filebytes)
if self.dummy_size is not None:
sample['data'] = torch.rand(self.dummy_size)
else:
image = Image.open(buff).convert('RGB')
orig, aug1, aug2 = self.transform_image(image)
sample['data'] = orig
sample['data_aug1'] = aug1
sample['data_aug2'] = aug2
sample['data_aux'] = self.transform_aux_image(image)
extras = ast.literal_eval(extra_str)
try:
for key, value in extras.items():
sample[key] = value
# if you use dic the code below will need ['label']
sample['label'] = 0
except AttributeError:
sample['label'] = int(extra_str)
# Generate Soft Label
soft_label = torch.Tensor(self.num_classes)
if sample['label'] < 0:
soft_label.fill_(1.0 / self.num_classes)
else:
soft_label.fill_(0)
soft_label[sample['label']] = 1
sample['soft_label'] = soft_label
except Exception as e:
logging.error('[{}] broken'.format(path))
raise e
return sample
| 3,619 | 31.909091 | 68 | py |
null | OpenOOD-main/openood/datasets/imglist_dataset.py | import ast
import io
import logging
import os
import torch
from PIL import Image, ImageFile
from .base_dataset import BaseDataset
# to fix "OSError: image file is truncated"
ImageFile.LOAD_TRUNCATED_IMAGES = True
class Convert:
def __init__(self, mode='RGB'):
self.mode = mode
def __call__(self, image):
return image.convert(self.mode)
class ImglistDataset(BaseDataset):
def __init__(self,
name,
imglist_pth,
data_dir,
num_classes,
preprocessor,
data_aux_preprocessor,
maxlen=None,
dummy_read=False,
dummy_size=None,
**kwargs):
super(ImglistDataset, self).__init__(**kwargs)
self.name = name
with open(imglist_pth) as imgfile:
self.imglist = imgfile.readlines()
self.data_dir = data_dir
self.num_classes = num_classes
self.preprocessor = preprocessor
self.transform_image = preprocessor
self.transform_aux_image = data_aux_preprocessor
self.maxlen = maxlen
self.dummy_read = dummy_read
self.dummy_size = dummy_size
if dummy_read and dummy_size is None:
raise ValueError(
'if dummy_read is True, should provide dummy_size')
def __len__(self):
if self.maxlen is None:
return len(self.imglist)
else:
return min(len(self.imglist), self.maxlen)
def getitem(self, index):
line = self.imglist[index].strip('\n')
tokens = line.split(' ', 1)
image_name, extra_str = tokens[0], tokens[1]
if self.data_dir != '' and image_name.startswith('/'):
raise RuntimeError('image_name starts with "/"')
path = os.path.join(self.data_dir, image_name)
sample = dict()
sample['image_name'] = image_name
kwargs = {'name': self.name, 'path': path, 'tokens': tokens}
try:
# some preprocessor methods require setup
self.preprocessor.setup(**kwargs)
except:
pass
try:
if not self.dummy_read:
with open(path, 'rb') as f:
content = f.read()
filebytes = content
buff = io.BytesIO(filebytes)
if self.dummy_size is not None:
sample['data'] = torch.rand(self.dummy_size)
else:
image = Image.open(buff).convert('RGB')
sample['data'] = self.transform_image(image)
sample['data_aux'] = self.transform_aux_image(image)
extras = ast.literal_eval(extra_str)
try:
for key, value in extras.items():
sample[key] = value
# if you use dic the code below will need ['label']
sample['label'] = 0
except AttributeError:
sample['label'] = int(extra_str)
# Generate Soft Label
soft_label = torch.Tensor(self.num_classes)
if sample['label'] < 0:
soft_label.fill_(1.0 / self.num_classes)
else:
soft_label.fill_(0)
soft_label[sample['label']] = 1
sample['soft_label'] = soft_label
except Exception as e:
logging.error('[{}] broken'.format(path))
raise e
return sample
| 3,481 | 31.542056 | 68 | py |
null | OpenOOD-main/openood/datasets/imglist_extradata_dataset.py | import ast
import io
import logging
import os
import numpy as np
import torch
from PIL import Image, ImageFile
from torch.utils.data import Sampler
from .base_dataset import BaseDataset
# to fix "OSError: image file is truncated"
ImageFile.LOAD_TRUNCATED_IMAGES = True
class Convert:
def __init__(self, mode='RGB'):
self.mode = mode
def __call__(self, image):
return image.convert(self.mode)
class ImglistExtraDataDataset(BaseDataset):
def __init__(self,
name,
imglist_pth,
data_dir,
num_classes,
preprocessor,
data_aux_preprocessor,
maxlen=None,
dummy_read=False,
dummy_size=None,
extra_data_pth=None,
extra_label_pth=None,
extra_percent=100,
**kwargs):
super(ImglistExtraDataDataset, self).__init__(**kwargs)
self.name = name
with open(imglist_pth) as imgfile:
self.imglist = imgfile.readlines()
self.data_dir = data_dir
self.num_classes = num_classes
self.preprocessor = preprocessor
self.transform_image = preprocessor
self.transform_aux_image = data_aux_preprocessor
self.maxlen = maxlen
self.dummy_read = dummy_read
self.dummy_size = dummy_size
if dummy_read and dummy_size is None:
raise ValueError(
'if dummy_read is True, should provide dummy_size')
self.orig_ids = list(range(len(self.imglist)))
assert extra_data_pth is not None
assert extra_label_pth is not None
extra_data = np.load(extra_data_pth)
extra_labels = np.load(extra_label_pth)
assert len(extra_data) == len(extra_labels)
self.extra_num = int(len(extra_labels) * extra_percent / 100.)
self.total_num = len(self.imglist) + self.extra_num
rng = np.random.RandomState(0)
indices = rng.permutation(len(extra_labels))
self.extra_data = extra_data[indices[:self.extra_num]]
self.extra_labels = extra_labels[indices[:self.extra_num]]
self.extra_ids = list(
set(range(self.total_num)) - set(range(len(self.imglist))))
def __len__(self):
return self.total_num
def getitem(self, index):
if index in self.orig_ids:
line = self.imglist[index].strip('\n')
tokens = line.split(' ', 1)
image_name, extra_str = tokens[0], tokens[1]
if self.data_dir != '' and image_name.startswith('/'):
raise RuntimeError('image_name starts with "/"')
path = os.path.join(self.data_dir, image_name)
sample = dict()
sample['image_name'] = image_name
kwargs = {'name': self.name, 'path': path, 'tokens': tokens}
# some preprocessor methods require setup
self.preprocessor.setup(**kwargs)
try:
if not self.dummy_read:
with open(path, 'rb') as f:
content = f.read()
filebytes = content
buff = io.BytesIO(filebytes)
if self.dummy_size is not None:
sample['data'] = torch.rand(self.dummy_size)
else:
image = Image.open(buff).convert('RGB')
sample['data'] = self.transform_image(image)
sample['data_aux'] = self.transform_aux_image(image)
extras = ast.literal_eval(extra_str)
try:
for key, value in extras.items():
sample[key] = value
# if you use dic the code below will need ['label']
sample['label'] = 0
except AttributeError:
sample['label'] = int(extra_str)
# Generate Soft Label
soft_label = torch.Tensor(self.num_classes)
if sample['label'] < 0:
soft_label.fill_(1.0 / self.num_classes)
else:
soft_label.fill_(0)
soft_label[sample['label']] = 1
sample['soft_label'] = soft_label
except Exception as e:
logging.error('[{}] broken'.format(path))
raise e
return sample
else:
ind = index - len(self.imglist)
image = Image.fromarray(self.extra_data[ind])
sample = dict()
sample['image_name'] = str(ind) # dummy name
sample['data'] = self.transform_image(image)
sample['data_aux'] = self.transform_aux_image(image)
sample['label'] = self.extra_labels[ind]
# Generate Soft Label
soft_label = torch.Tensor(self.num_classes)
if sample['label'] < 0:
soft_label.fill_(1.0 / self.num_classes)
else:
soft_label.fill_(0)
soft_label[sample['label']] = 1
sample['soft_label'] = soft_label
return sample
class TwoSourceSampler(Sampler):
def __init__(self, real_inds, syn_inds, batch_size, real_ratio=0.5):
assert len(real_inds) == 50000
self.real_inds = real_inds
self.syn_inds = syn_inds
self.batch_size = batch_size
self.real_batch_size = int(self.batch_size * real_ratio)
self.syn_batch_size = self.batch_size - self.real_batch_size
if real_ratio == 0:
assert self.real_batch_size == 0
elif real_ratio == 1:
assert self.syn_batch_size == 0
self.num_batches = int(np.ceil(len(self.real_inds) / self.batch_size))
super().__init__(None)
def __iter__(self):
batch_counter = 0
real_inds_shuffled = [
self.real_inds[i] for i in torch.randperm(len(self.real_inds))
]
syn_inds_shuffled = [
self.syn_inds[i] for i in torch.randperm(len(self.syn_inds))
]
real_offset = 0
syn_offset = 0
while batch_counter < self.num_batches:
real_batch = real_inds_shuffled[
real_offset:min(real_offset +
self.real_batch_size, len(real_inds_shuffled))]
real_offset += self.real_batch_size
syn_batch = syn_inds_shuffled[
syn_offset:min(syn_offset +
self.syn_batch_size, len(syn_inds_shuffled))]
syn_offset += self.syn_batch_size
batch = real_batch + syn_batch
np.random.shuffle(batch)
yield batch
batch_counter += 1
def __len__(self):
return self.num_batches
| 6,836 | 34.795812 | 79 | py |
null | OpenOOD-main/openood/datasets/udg_dataset.py | import ast
import io
import logging
import os
import numpy as np
import torch
from PIL import Image, ImageFile
from .imglist_dataset import ImglistDataset
# to fix "OSError: image file is truncated"
ImageFile.LOAD_TRUNCATED_IMAGES = True
class UDGDataset(ImglistDataset):
def __init__(self,
name,
imglist_pth,
data_dir,
num_classes,
preprocessor,
data_aux_preprocessor,
maxlen=None,
dummy_read=False,
dummy_size=None,
**kwargs):
super(UDGDataset,
self).__init__(name, imglist_pth, data_dir, num_classes,
preprocessor, data_aux_preprocessor, maxlen,
dummy_read, dummy_size, **kwargs)
self.cluster_id = np.zeros(len(self.imglist), dtype=int)
self.cluster_reweight = np.ones(len(self.imglist), dtype=float)
# use pseudo labels for unlabeled dataset during training
self.pseudo_label = np.array(-1 * np.ones(len(self.imglist)),
dtype=int)
self.ood_conf = np.ones(len(self.imglist), dtype=float)
def getitem(self, index):
line = self.imglist[index].strip('\n')
tokens = line.split(' ', 1)
image_name, extra_str = tokens[0], tokens[1]
if self.data_dir != '' and image_name.startswith('/'):
raise RuntimeError('root not empty but image_name starts with "/"')
path = os.path.join(self.data_dir, image_name)
sample = dict()
sample['image_name'] = image_name
try:
if not self.dummy_read:
with open(path, 'rb') as f:
content = f.read()
filebytes = content
buff = io.BytesIO(filebytes)
if self.dummy_size is not None:
sample['data'] = torch.rand(self.dummy_size)
else:
image = Image.open(buff).convert('RGB')
sample['data'] = self.transform_image(image)
sample['data_aux'] = self.transform_aux_image(image)
extras = ast.literal_eval(extra_str)
try:
for key, value in extras.items():
sample[key] = value
except AttributeError:
sample['label'] = int(extra_str)
# Generate Soft Label
soft_label = torch.Tensor(self.num_classes)
if sample['label'] < 0:
soft_label.fill_(1.0 / self.num_classes)
else:
soft_label.fill_(0)
soft_label[sample['label']] = 1
sample['soft_label'] = soft_label
# Deep Clustering Aux Label Assignment for
# both labeled/unlabeled data
sample['cluster_id'] = self.cluster_id[index]
sample['cluster_reweight'] = self.cluster_reweight[index]
# Deep Clustering Pseudo Label Assignment for unlabeled data
sample['pseudo_label'] = self.pseudo_label[index]
soft_pseudo_label = torch.Tensor(len(sample['soft_label']))
if sample['pseudo_label'] == -1:
soft_pseudo_label.fill_(1.0 / len(sample['soft_label']))
else:
soft_pseudo_label.fill_(0.0)
soft_pseudo_label[sample['pseudo_label']] = 1.0
sample['pseudo_softlabel'] = soft_pseudo_label
sample['ood_conf'] = self.ood_conf[index]
except Exception as e:
logging.error('[{}] broken'.format(path))
raise e
return sample
| 3,672 | 37.260417 | 79 | py |
null | OpenOOD-main/openood/datasets/utils.py | import os
import torch
from numpy import load
from torch.utils.data import DataLoader
from openood.preprocessors.test_preprocessor import TestStandardPreProcessor
from openood.preprocessors.utils import get_preprocessor
from openood.utils.config import Config
from .feature_dataset import FeatDataset
from .imglist_dataset import ImglistDataset
from .imglist_augmix_dataset import ImglistAugMixDataset
from .imglist_extradata_dataset import ImglistExtraDataDataset, TwoSourceSampler
from .udg_dataset import UDGDataset
def get_dataloader(config: Config):
# prepare a dataloader dictionary
dataset_config = config.dataset
dataloader_dict = {}
for split in dataset_config.split_names:
split_config = dataset_config[split]
preprocessor = get_preprocessor(config, split)
# weak augmentation for data_aux
data_aux_preprocessor = TestStandardPreProcessor(config)
if split_config.dataset_class == 'ImglistExtraDataDataset':
dataset = ImglistExtraDataDataset(
name=dataset_config.name + '_' + split,
imglist_pth=split_config.imglist_pth,
data_dir=split_config.data_dir,
num_classes=dataset_config.num_classes,
preprocessor=preprocessor,
data_aux_preprocessor=data_aux_preprocessor,
extra_data_pth=split_config.extra_data_pth,
extra_label_pth=split_config.extra_label_pth,
extra_percent=split_config.extra_percent)
batch_sampler = TwoSourceSampler(dataset.orig_ids,
dataset.extra_ids,
split_config.batch_size,
split_config.orig_ratio)
dataloader = DataLoader(
dataset,
batch_sampler=batch_sampler,
num_workers=dataset_config.num_workers,
)
elif split_config.dataset_class == 'ImglistAugMixDataset':
dataset = ImglistAugMixDataset(
name=dataset_config.name + '_' + split,
imglist_pth=split_config.imglist_pth,
data_dir=split_config.data_dir,
num_classes=dataset_config.num_classes,
preprocessor=preprocessor,
data_aux_preprocessor=data_aux_preprocessor)
sampler = None
if dataset_config.num_gpus * dataset_config.num_machines > 1:
sampler = torch.utils.data.distributed.DistributedSampler(
dataset)
split_config.shuffle = False
dataloader = DataLoader(dataset,
batch_size=split_config.batch_size,
shuffle=split_config.shuffle,
num_workers=dataset_config.num_workers,
sampler=sampler)
else:
CustomDataset = eval(split_config.dataset_class)
dataset = CustomDataset(
name=dataset_config.name + '_' + split,
imglist_pth=split_config.imglist_pth,
data_dir=split_config.data_dir,
num_classes=dataset_config.num_classes,
preprocessor=preprocessor,
data_aux_preprocessor=data_aux_preprocessor)
sampler = None
if dataset_config.num_gpus * dataset_config.num_machines > 1:
sampler = torch.utils.data.distributed.DistributedSampler(
dataset)
split_config.shuffle = False
dataloader = DataLoader(dataset,
batch_size=split_config.batch_size,
shuffle=split_config.shuffle,
num_workers=dataset_config.num_workers,
sampler=sampler)
dataloader_dict[split] = dataloader
return dataloader_dict
def get_ood_dataloader(config: Config):
# specify custom dataset class
ood_config = config.ood_dataset
CustomDataset = eval(ood_config.dataset_class)
dataloader_dict = {}
for split in ood_config.split_names:
split_config = ood_config[split]
preprocessor = get_preprocessor(config, split)
data_aux_preprocessor = TestStandardPreProcessor(config)
if split == 'val':
# validation set
dataset = CustomDataset(
name=ood_config.name + '_' + split,
imglist_pth=split_config.imglist_pth,
data_dir=split_config.data_dir,
num_classes=ood_config.num_classes,
preprocessor=preprocessor,
data_aux_preprocessor=data_aux_preprocessor)
dataloader = DataLoader(dataset,
batch_size=ood_config.batch_size,
shuffle=ood_config.shuffle,
num_workers=ood_config.num_workers)
dataloader_dict[split] = dataloader
else:
# dataloaders for csid, nearood, farood
sub_dataloader_dict = {}
for dataset_name in split_config.datasets:
dataset_config = split_config[dataset_name]
dataset = CustomDataset(
name=ood_config.name + '_' + split,
imglist_pth=dataset_config.imglist_pth,
data_dir=dataset_config.data_dir,
num_classes=ood_config.num_classes,
preprocessor=preprocessor,
data_aux_preprocessor=data_aux_preprocessor)
dataloader = DataLoader(dataset,
batch_size=ood_config.batch_size,
shuffle=ood_config.shuffle,
num_workers=ood_config.num_workers)
sub_dataloader_dict[dataset_name] = dataloader
dataloader_dict[split] = sub_dataloader_dict
return dataloader_dict
def get_feature_dataloader(dataset_config: Config):
# load in the cached feature
loaded_data = load(dataset_config.feat_path, allow_pickle=True)
total_feat = torch.from_numpy(loaded_data['feat_list'])
del loaded_data
# reshape the vector to fit in to the network
total_feat.unsqueeze_(-1).unsqueeze_(-1)
# let's see what we got here should be something like:
# torch.Size([total_num, channel_size, 1, 1])
print('Loaded feature size: {}'.format(total_feat.shape))
split_config = dataset_config['train']
dataset = FeatDataset(feat=total_feat)
dataloader = DataLoader(dataset,
batch_size=split_config.batch_size,
shuffle=split_config.shuffle,
num_workers=dataset_config.num_workers)
return dataloader
def get_feature_opengan_dataloader(dataset_config: Config):
feat_root = dataset_config.feat_root
dataloader_dict = {}
for d in ['id_train', 'id_val', 'ood_val']:
# load in the cached feature
loaded_data = load(os.path.join(feat_root, f'{d}.npz'),
allow_pickle=True)
total_feat = torch.from_numpy(loaded_data['feat_list'])
total_labels = loaded_data['label_list']
del loaded_data
# reshape the vector to fit in to the network
total_feat.unsqueeze_(-1).unsqueeze_(-1)
# let's see what we got here should be something like:
# torch.Size([total_num, channel_size, 1, 1])
print('Loaded feature size: {}'.format(total_feat.shape))
if d == 'id_train':
split_config = dataset_config['train']
else:
split_config = dataset_config['val']
dataset = FeatDataset(feat=total_feat, labels=total_labels)
dataloader = DataLoader(dataset,
batch_size=split_config.batch_size,
shuffle=split_config.shuffle,
num_workers=dataset_config.num_workers)
dataloader_dict[d] = dataloader
return dataloader_dict
| 8,240 | 42.373684 | 80 | py |
null | OpenOOD-main/openood/evaluation_api/__init__.py | from .evaluator import Evaluator
| 33 | 16 | 32 | py |
null | OpenOOD-main/openood/evaluation_api/datasets.py | import os
import gdown
import zipfile
from torch.utils.data import DataLoader
import torchvision as tvs
if tvs.__version__ >= '0.13':
tvs_new = True
else:
tvs_new = False
from openood.datasets.imglist_dataset import ImglistDataset
from openood.preprocessors import BasePreprocessor
from .preprocessor import get_default_preprocessor, ImageNetCPreProcessor
DATA_INFO = {
'cifar10': {
'num_classes': 10,
'id': {
'train': {
'data_dir': 'images_classic/',
'imglist_path': 'benchmark_imglist/cifar10/train_cifar10.txt'
},
'val': {
'data_dir': 'images_classic/',
'imglist_path': 'benchmark_imglist/cifar10/val_cifar10.txt'
},
'test': {
'data_dir': 'images_classic/',
'imglist_path': 'benchmark_imglist/cifar10/test_cifar10.txt'
}
},
'csid': {
'datasets': ['cifar10c'],
'cinic10': {
'data_dir': 'images_classic/',
'imglist_path': 'benchmark_imglist/cifar10/val_cinic10.txt'
},
'cifar10c': {
'data_dir': 'images_classic/',
'imglist_path': 'benchmark_imglist/cifar10/test_cifar10c.txt'
}
},
'ood': {
'val': {
'data_dir': 'images_classic/',
'imglist_path': 'benchmark_imglist/cifar10/val_tin.txt'
},
'near': {
'datasets': ['cifar100', 'tin'],
'cifar100': {
'data_dir': 'images_classic/',
'imglist_path':
'benchmark_imglist/cifar10/test_cifar100.txt'
},
'tin': {
'data_dir': 'images_classic/',
'imglist_path': 'benchmark_imglist/cifar10/test_tin.txt'
}
},
'far': {
'datasets': ['mnist', 'svhn', 'texture', 'places365'],
'mnist': {
'data_dir': 'images_classic/',
'imglist_path': 'benchmark_imglist/cifar10/test_mnist.txt'
},
'svhn': {
'data_dir': 'images_classic/',
'imglist_path': 'benchmark_imglist/cifar10/test_svhn.txt'
},
'texture': {
'data_dir': 'images_classic/',
'imglist_path':
'benchmark_imglist/cifar10/test_texture.txt'
},
'places365': {
'data_dir': 'images_classic/',
'imglist_path':
'benchmark_imglist/cifar10/test_places365.txt'
},
}
}
},
'cifar100': {
'num_classes': 100,
'id': {
'train': {
'data_dir': 'images_classic/',
'imglist_path': 'benchmark_imglist/cifar100/train_cifar100.txt'
},
'val': {
'data_dir': 'images_classic/',
'imglist_path': 'benchmark_imglist/cifar100/val_cifar100.txt'
},
'test': {
'data_dir': 'images_classic/',
'imglist_path': 'benchmark_imglist/cifar100/test_cifar100.txt'
}
},
'csid': {
'datasets': [],
},
'ood': {
'val': {
'data_dir': 'images_classic/',
'imglist_path': 'benchmark_imglist/cifar100/val_tin.txt'
},
'near': {
'datasets': ['cifar10', 'tin'],
'cifar10': {
'data_dir': 'images_classic/',
'imglist_path':
'benchmark_imglist/cifar100/test_cifar10.txt'
},
'tin': {
'data_dir': 'images_classic/',
'imglist_path': 'benchmark_imglist/cifar100/test_tin.txt'
}
},
'far': {
'datasets': ['mnist', 'svhn', 'texture', 'places365'],
'mnist': {
'data_dir': 'images_classic/',
'imglist_path': 'benchmark_imglist/cifar100/test_mnist.txt'
},
'svhn': {
'data_dir': 'images_classic/',
'imglist_path': 'benchmark_imglist/cifar100/test_svhn.txt'
},
'texture': {
'data_dir': 'images_classic/',
'imglist_path':
'benchmark_imglist/cifar100/test_texture.txt'
},
'places365': {
'data_dir': 'images_classic/',
'imglist_path':
'benchmark_imglist/cifar100/test_places365.txt'
}
},
}
},
'imagenet200': {
'num_classes': 200,
'id': {
'train': {
'data_dir':
'images_largescale/',
'imglist_path':
'benchmark_imglist/imagenet200/train_imagenet200.txt'
},
'val': {
'data_dir': 'images_largescale/',
'imglist_path':
'benchmark_imglist/imagenet200/val_imagenet200.txt'
},
'test': {
'data_dir':
'images_largescale/',
'imglist_path':
'benchmark_imglist/imagenet200/test_imagenet200.txt'
}
},
'csid': {
'datasets': ['imagenet_v2', 'imagenet_c', 'imagenet_r'],
'imagenet_v2': {
'data_dir':
'images_largescale/',
'imglist_path':
'benchmark_imglist/imagenet200/test_imagenet200_v2.txt'
},
'imagenet_c': {
'data_dir':
'images_largescale/',
'imglist_path':
'benchmark_imglist/imagenet200/test_imagenet200_c.txt'
},
'imagenet_r': {
'data_dir':
'images_largescale/',
'imglist_path':
'benchmark_imglist/imagenet200/test_imagenet200_r.txt'
},
},
'ood': {
'val': {
'data_dir': 'images_largescale/',
'imglist_path':
'benchmark_imglist/imagenet200/val_openimage_o.txt'
},
'near': {
'datasets': ['ssb_hard', 'ninco'],
'ssb_hard': {
'data_dir':
'images_largescale/',
'imglist_path':
'benchmark_imglist/imagenet200/test_ssb_hard.txt'
},
'ninco': {
'data_dir': 'images_largescale/',
'imglist_path':
'benchmark_imglist/imagenet200/test_ninco.txt'
}
},
'far': {
'datasets': ['inaturalist', 'textures', 'openimage_o'],
'inaturalist': {
'data_dir':
'images_largescale/',
'imglist_path':
'benchmark_imglist/imagenet200/test_inaturalist.txt'
},
'textures': {
'data_dir':
'images_classic/',
'imglist_path':
'benchmark_imglist/imagenet200/test_textures.txt'
},
'openimage_o': {
'data_dir':
'images_largescale/',
'imglist_path':
'benchmark_imglist/imagenet200/test_openimage_o.txt'
},
},
}
},
'imagenet': {
'num_classes': 1000,
'id': {
'train': {
'data_dir': 'images_largescale/',
'imglist_path': 'benchmark_imglist/imagenet/train_imagenet.txt'
},
'val': {
'data_dir': 'images_largescale/',
'imglist_path': 'benchmark_imglist/imagenet/val_imagenet.txt'
},
'test': {
'data_dir': 'images_largescale/',
'imglist_path': 'benchmark_imglist/imagenet/test_imagenet.txt'
}
},
'csid': {
'datasets': ['imagenet_v2', 'imagenet_c', 'imagenet_r'],
'imagenet_v2': {
'data_dir': 'images_largescale/',
'imglist_path':
'benchmark_imglist/imagenet/test_imagenet_v2.txt'
},
'imagenet_c': {
'data_dir': 'images_largescale/',
'imglist_path':
'benchmark_imglist/imagenet/test_imagenet_c.txt'
},
'imagenet_r': {
'data_dir': 'images_largescale/',
'imglist_path':
'benchmark_imglist/imagenet/test_imagenet_r.txt'
},
},
'ood': {
'val': {
'data_dir': 'images_largescale/',
'imglist_path':
'benchmark_imglist/imagenet/val_openimage_o.txt'
},
'near': {
'datasets': ['ssb_hard', 'ninco'],
'ssb_hard': {
'data_dir': 'images_largescale/',
'imglist_path':
'benchmark_imglist/imagenet/test_ssb_hard.txt'
},
'ninco': {
'data_dir': 'images_largescale/',
'imglist_path': 'benchmark_imglist/imagenet/test_ninco.txt'
}
},
'far': {
'datasets': ['inaturalist', 'textures', 'openimage_o'],
'inaturalist': {
'data_dir':
'images_largescale/',
'imglist_path':
'benchmark_imglist/imagenet/test_inaturalist.txt'
},
'textures': {
'data_dir': 'images_classic/',
'imglist_path':
'benchmark_imglist/imagenet/test_textures.txt'
},
'openimage_o': {
'data_dir':
'images_largescale/',
'imglist_path':
'benchmark_imglist/imagenet/test_openimage_o.txt'
},
},
}
},
}
download_id_dict = {
'cifar10': '1Co32RiiWe16lTaiOU6JMMnyUYS41IlO1',
'cifar100': '1PGKheHUsf29leJPPGuXqzLBMwl8qMF8_',
'tin': '1PZ-ixyx52U989IKsMA2OT-24fToTrelC',
'mnist': '1CCHAGWqA1KJTFFswuF9cbhmB-j98Y1Sb',
'svhn': '1DQfc11HOtB1nEwqS4pWUFp8vtQ3DczvI',
'texture': '1OSz1m3hHfVWbRdmMwKbUzoU8Hg9UKcam',
'places365': '1Ec-LRSTf6u5vEctKX9vRp9OA6tqnJ0Ay',
'imagenet_1k': '1i1ipLDFARR-JZ9argXd2-0a6DXwVhXEj',
'species_sub': '1-JCxDx__iFMExkYRMylnGJYTPvyuX6aq',
'ssb_hard': '1PzkA-WGG8Z18h0ooL_pDdz9cO-DCIouE',
'ninco': '1Z82cmvIB0eghTehxOGP5VTdLt7OD3nk6',
'inaturalist': '1zfLfMvoUD0CUlKNnkk7LgxZZBnTBipdj',
'places': '1fZ8TbPC4JGqUCm-VtvrmkYxqRNp2PoB3',
'sun': '1ISK0STxWzWmg-_uUr4RQ8GSLFW7TZiKp',
'openimage_o': '1VUFXnB_z70uHfdgJG2E_pjYOcEgqM7tE',
'imagenet_v2': '1akg2IiE22HcbvTBpwXQoD7tgfPCdkoho',
'imagenet_r': '1EzjMN2gq-bVV7lg-MEAdeuBuz-7jbGYU',
'imagenet_c': '1JeXL9YH4BO8gCJ631c5BHbaSsl-lekHt',
'benchmark_imglist': '1XKzBdWCqg3vPoj-D32YixJyJJ0hL63gP'
}
dir_dict = {
'images_classic/': [
'cifar100', 'tin', 'tin597', 'svhn', 'cinic10', 'imagenet10', 'mnist',
'fashionmnist', 'cifar10', 'cifar100c', 'places365', 'cifar10c',
'fractals_and_fvis', 'usps', 'texture', 'notmnist'
],
'images_largescale/': [
'imagenet_1k',
'ssb_hard',
'ninco',
'inaturalist',
'places',
'sun',
'openimage_o',
'imagenet_v2',
'imagenet_c',
'imagenet_r',
],
'images_medical/': ['actmed', 'bimcv', 'ct', 'hannover', 'xraybone'],
}
benchmarks_dict = {
'cifar10':
['cifar10', 'cifar100', 'tin', 'mnist', 'svhn', 'texture', 'places365'],
'cifar100':
['cifar100', 'cifar10', 'tin', 'mnist', 'svhn', 'texture', 'places365'],
'imagenet200': [
'imagenet_1k', 'ssb_hard', 'ninco', 'inaturalist', 'texture',
'openimage_o', 'imagenet_v2', 'imagenet_c', 'imagenet_r'
],
'imagenet': [
'imagenet_1k', 'ssb_hard', 'ninco', 'inaturalist', 'texture',
'openimage_o', 'imagenet_v2', 'imagenet_c', 'imagenet_r'
],
}
def require_download(filename, path):
for item in os.listdir(path):
if item.startswith(filename) or filename.startswith(
item) or path.endswith(filename):
return False
else:
print(filename + ' needs download:')
return True
def download_dataset(dataset, data_root):
for key in dir_dict.keys():
if dataset in dir_dict[key]:
store_path = os.path.join(data_root, key, dataset)
if not os.path.exists(store_path):
os.makedirs(store_path)
break
else:
print('Invalid dataset detected {}'.format(dataset))
return
if require_download(dataset, store_path):
print(store_path)
if not store_path.endswith('/'):
store_path = store_path + '/'
gdown.download(id=download_id_dict[dataset], output=store_path)
file_path = os.path.join(store_path, dataset + '.zip')
with zipfile.ZipFile(file_path, 'r') as zip_file:
zip_file.extractall(store_path)
os.remove(file_path)
def data_setup(data_root, id_data_name):
if not data_root.endswith('/'):
data_root = data_root + '/'
if not os.path.exists(os.path.join(data_root, 'benchmark_imglist')):
gdown.download(id=download_id_dict['benchmark_imglist'],
output=data_root)
file_path = os.path.join(data_root, 'benchmark_imglist.zip')
with zipfile.ZipFile(file_path, 'r') as zip_file:
zip_file.extractall(data_root)
os.remove(file_path)
for dataset in benchmarks_dict[id_data_name]:
download_dataset(dataset, data_root)
def get_id_ood_dataloader(id_name, data_root, preprocessor, **loader_kwargs):
if 'imagenet' in id_name:
if tvs_new:
if isinstance(preprocessor,
tvs.transforms._presets.ImageClassification):
mean, std = preprocessor.mean, preprocessor.std
elif isinstance(preprocessor, tvs.transforms.Compose):
temp = preprocessor.transforms[-1]
mean, std = temp.mean, temp.std
elif isinstance(preprocessor, BasePreprocessor):
temp = preprocessor.transform.transforms[-1]
mean, std = temp.mean, temp.std
else:
raise TypeError
else:
if isinstance(preprocessor, tvs.transforms.Compose):
temp = preprocessor.transforms[-1]
mean, std = temp.mean, temp.std
elif isinstance(preprocessor, BasePreprocessor):
temp = preprocessor.transform.transforms[-1]
mean, std = temp.mean, temp.std
else:
raise TypeError
imagenet_c_preprocessor = ImageNetCPreProcessor(mean, std)
# weak augmentation for data_aux
test_standard_preprocessor = get_default_preprocessor(id_name)
dataloader_dict = {}
data_info = DATA_INFO[id_name]
# id
sub_dataloader_dict = {}
for split in data_info['id'].keys():
dataset = ImglistDataset(
name='_'.join((id_name, split)),
imglist_pth=os.path.join(data_root,
data_info['id'][split]['imglist_path']),
data_dir=os.path.join(data_root,
data_info['id'][split]['data_dir']),
num_classes=data_info['num_classes'],
preprocessor=preprocessor,
data_aux_preprocessor=test_standard_preprocessor)
dataloader = DataLoader(dataset, **loader_kwargs)
sub_dataloader_dict[split] = dataloader
dataloader_dict['id'] = sub_dataloader_dict
# csid
sub_dataloader_dict = {}
for dataset_name in data_info['csid']['datasets']:
dataset = ImglistDataset(
name='_'.join((id_name, 'csid', dataset_name)),
imglist_pth=os.path.join(
data_root, data_info['csid'][dataset_name]['imglist_path']),
data_dir=os.path.join(data_root,
data_info['csid'][dataset_name]['data_dir']),
num_classes=data_info['num_classes'],
preprocessor=preprocessor
if dataset_name != 'imagenet_c' else imagenet_c_preprocessor,
data_aux_preprocessor=test_standard_preprocessor)
dataloader = DataLoader(dataset, **loader_kwargs)
sub_dataloader_dict[dataset_name] = dataloader
dataloader_dict['csid'] = sub_dataloader_dict
# ood
dataloader_dict['ood'] = {}
for split in data_info['ood'].keys():
split_config = data_info['ood'][split]
if split == 'val':
# validation set
dataset = ImglistDataset(
name='_'.join((id_name, 'ood', split)),
imglist_pth=os.path.join(data_root,
split_config['imglist_path']),
data_dir=os.path.join(data_root, split_config['data_dir']),
num_classes=data_info['num_classes'],
preprocessor=preprocessor,
data_aux_preprocessor=test_standard_preprocessor)
dataloader = DataLoader(dataset, **loader_kwargs)
dataloader_dict['ood'][split] = dataloader
else:
# dataloaders for nearood, farood
sub_dataloader_dict = {}
for dataset_name in split_config['datasets']:
dataset_config = split_config[dataset_name]
dataset = ImglistDataset(
name='_'.join((id_name, 'ood', dataset_name)),
imglist_pth=os.path.join(data_root,
dataset_config['imglist_path']),
data_dir=os.path.join(data_root,
dataset_config['data_dir']),
num_classes=data_info['num_classes'],
preprocessor=preprocessor,
data_aux_preprocessor=test_standard_preprocessor)
dataloader = DataLoader(dataset, **loader_kwargs)
sub_dataloader_dict[dataset_name] = dataloader
dataloader_dict['ood'][split] = sub_dataloader_dict
return dataloader_dict
| 19,019 | 36.003891 | 79 | py |
null | OpenOOD-main/openood/evaluation_api/evaluator.py | from typing import Callable, List, Type
import os
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from openood.evaluators.metrics import compute_all_metrics
from openood.postprocessors import BasePostprocessor
from openood.networks.ash_net import ASHNet
from openood.networks.react_net import ReactNet
from .datasets import DATA_INFO, data_setup, get_id_ood_dataloader
from .postprocessor import get_postprocessor
from .preprocessor import get_default_preprocessor
class Evaluator:
def __init__(
self,
net: nn.Module,
id_name: str,
data_root: str = './data',
config_root: str = './configs',
preprocessor: Callable = None,
postprocessor_name: str = None,
postprocessor: Type[BasePostprocessor] = None,
batch_size: int = 200,
shuffle: bool = False,
num_workers: int = 4,
) -> None:
"""A unified, easy-to-use API for evaluating (most) discriminative OOD
detection methods.
Args:
net (nn.Module):
The base classifier.
id_name (str):
The name of the in-distribution dataset.
data_root (str, optional):
The path of the data folder. Defaults to './data'.
config_root (str, optional):
The path of the config folder. Defaults to './configs'.
preprocessor (Callable, optional):
The preprocessor of input images.
Passing None will use the default preprocessor
following convention. Defaults to None.
postprocessor_name (str, optional):
The name of the postprocessor that obtains OOD score.
Ignored if an actual postprocessor is passed.
Defaults to None.
postprocessor (Type[BasePostprocessor], optional):
An actual postprocessor instance which inherits
OpenOOD's BasePostprocessor. Defaults to None.
batch_size (int, optional):
The batch size of samples. Defaults to 200.
shuffle (bool, optional):
Whether shuffling samples. Defaults to False.
num_workers (int, optional):
The num_workers argument that will be passed to
data loaders. Defaults to 4.
Raises:
ValueError:
If both postprocessor_name and postprocessor are None.
ValueError:
If the specified ID dataset {id_name} is not supported.
TypeError:
If the passed postprocessor does not inherit BasePostprocessor.
"""
# check the arguments
if postprocessor_name is None and postprocessor is None:
raise ValueError('Please pass postprocessor_name or postprocessor')
if postprocessor_name is not None and postprocessor is not None:
print(
'Postprocessor_name is ignored because postprocessor is passed'
)
if id_name not in DATA_INFO:
raise ValueError(f'Dataset [{id_name}] is not supported')
# get data preprocessor
if preprocessor is None:
preprocessor = get_default_preprocessor(id_name)
# set up config root
if config_root is None:
filepath = os.path.dirname(os.path.abspath(__file__))
config_root = os.path.join(*filepath.split('/')[:-2], 'configs')
# get postprocessor
if postprocessor is None:
postprocessor = get_postprocessor(config_root, postprocessor_name,
id_name)
if not isinstance(postprocessor, BasePostprocessor):
raise TypeError(
'postprocessor should inherit BasePostprocessor in OpenOOD')
# load data
data_setup(data_root, id_name)
loader_kwargs = {
'batch_size': batch_size,
'shuffle': shuffle,
'num_workers': num_workers
}
dataloader_dict = get_id_ood_dataloader(id_name, data_root,
preprocessor, **loader_kwargs)
# wrap base model to work with certain postprocessors
if postprocessor_name == 'react':
net = ReactNet(net)
elif postprocessor_name == 'ash':
net = ASHNet(net)
# postprocessor setup
postprocessor.setup(net, dataloader_dict['id'], dataloader_dict['ood'])
self.id_name = id_name
self.net = net
self.preprocessor = preprocessor
self.postprocessor = postprocessor
self.dataloader_dict = dataloader_dict
self.metrics = {
'id_acc': None,
'csid_acc': None,
'ood': None,
'fsood': None
}
self.scores = {
'id': {
'train': None,
'val': None,
'test': None
},
'csid': {k: None
for k in dataloader_dict['csid'].keys()},
'ood': {
'val': None,
'near':
{k: None
for k in dataloader_dict['ood']['near'].keys()},
'far': {k: None
for k in dataloader_dict['ood']['far'].keys()},
},
'id_preds': None,
'id_labels': None,
'csid_preds': {k: None
for k in dataloader_dict['csid'].keys()},
'csid_labels': {k: None
for k in dataloader_dict['csid'].keys()},
}
# perform hyperparameter search if have not done so
if (self.postprocessor.APS_mode
and not self.postprocessor.hyperparam_search_done):
self.hyperparam_search()
self.net.eval()
# how to ensure the postprocessors can work with
# models whose definition doesn't align with OpenOOD
def _classifier_inference(self,
data_loader: DataLoader,
msg: str = 'Acc Eval',
progress: bool = True):
self.net.eval()
all_preds = []
all_labels = []
with torch.no_grad():
for batch in tqdm(data_loader, desc=msg, disable=not progress):
data = batch['data'].cuda()
logits = self.net(data)
preds = logits.argmax(1)
all_preds.append(preds.cpu())
all_labels.append(batch['label'])
all_preds = torch.cat(all_preds)
all_labels = torch.cat(all_labels)
return all_preds, all_labels
def eval_acc(self, data_name: str = 'id') -> float:
if data_name == 'id':
if self.metrics['id_acc'] is not None:
return self.metrics['id_acc']
else:
if self.scores['id_preds'] is None:
all_preds, all_labels = self._classifier_inference(
self.dataloader_dict['id']['test'], 'ID Acc Eval')
self.scores['id_preds'] = all_preds
self.scores['id_labels'] = all_labels
else:
all_preds = self.scores['id_preds']
all_labels = self.scores['id_labels']
assert len(all_preds) == len(all_labels)
correct = (all_preds == all_labels).sum().item()
acc = correct / len(all_labels) * 100
self.metrics['id_acc'] = acc
return acc
elif data_name == 'csid':
if self.metrics['csid_acc'] is not None:
return self.metrics['csid_acc']
else:
correct, total = 0, 0
for _, (dataname, dataloader) in enumerate(
self.dataloader_dict['csid'].items()):
if self.scores['csid_preds'][dataname] is None:
all_preds, all_labels = self._classifier_inference(
dataloader, f'CSID {dataname} Acc Eval')
self.scores['csid_preds'][dataname] = all_preds
self.scores['csid_labels'][dataname] = all_labels
else:
all_preds = self.scores['csid_preds'][dataname]
all_labels = self.scores['csid_labels'][dataname]
assert len(all_preds) == len(all_labels)
c = (all_preds == all_labels).sum().item()
t = len(all_labels)
correct += c
total += t
if self.scores['id_preds'] is None:
all_preds, all_labels = self._classifier_inference(
self.dataloader_dict['id']['test'], 'ID Acc Eval')
self.scores['id_preds'] = all_preds
self.scores['id_labels'] = all_labels
else:
all_preds = self.scores['id_preds']
all_labels = self.scores['id_labels']
correct += (all_preds == all_labels).sum().item()
total += len(all_labels)
acc = correct / total * 100
self.metrics['csid_acc'] = acc
return acc
else:
raise ValueError(f'Unknown data name {data_name}')
def eval_ood(self, fsood: bool = False, progress: bool = True):
id_name = 'id' if not fsood else 'csid'
task = 'ood' if not fsood else 'fsood'
if self.metrics[task] is None:
self.net.eval()
# id score
if self.scores['id']['test'] is None:
print(f'Performing inference on {self.id_name} test set...',
flush=True)
id_pred, id_conf, id_gt = self.postprocessor.inference(
self.net, self.dataloader_dict['id']['test'], progress)
self.scores['id']['test'] = [id_pred, id_conf, id_gt]
else:
id_pred, id_conf, id_gt = self.scores['id']['test']
if fsood:
csid_pred, csid_conf, csid_gt = [], [], []
for i, dataset_name in enumerate(self.scores['csid'].keys()):
if self.scores['csid'][dataset_name] is None:
print(
f'Performing inference on {self.id_name} '
f'(cs) test set [{i+1}]: {dataset_name}...',
flush=True)
temp_pred, temp_conf, temp_gt = \
self.postprocessor.inference(
self.net,
self.dataloader_dict['csid'][dataset_name],
progress)
self.scores['csid'][dataset_name] = [
temp_pred, temp_conf, temp_gt
]
csid_pred.append(self.scores['csid'][dataset_name][0])
csid_conf.append(self.scores['csid'][dataset_name][1])
csid_gt.append(self.scores['csid'][dataset_name][2])
csid_pred = np.concatenate(csid_pred)
csid_conf = np.concatenate(csid_conf)
csid_gt = np.concatenate(csid_gt)
id_pred = np.concatenate((id_pred, csid_pred))
id_conf = np.concatenate((id_conf, csid_conf))
id_gt = np.concatenate((id_gt, csid_gt))
# load nearood data and compute ood metrics
near_metrics = self._eval_ood([id_pred, id_conf, id_gt],
ood_split='near',
progress=progress)
# load farood data and compute ood metrics
far_metrics = self._eval_ood([id_pred, id_conf, id_gt],
ood_split='far',
progress=progress)
if self.metrics[f'{id_name}_acc'] is None:
self.eval_acc(id_name)
near_metrics[:, -1] = np.array([self.metrics[f'{id_name}_acc']] *
len(near_metrics))
far_metrics[:, -1] = np.array([self.metrics[f'{id_name}_acc']] *
len(far_metrics))
self.metrics[task] = pd.DataFrame(
np.concatenate([near_metrics, far_metrics], axis=0),
index=list(self.dataloader_dict['ood']['near'].keys()) +
['nearood'] + list(self.dataloader_dict['ood']['far'].keys()) +
['farood'],
columns=[
'FPR@95', 'AUROC', 'AUPR_IN', 'AUPR_OUT', 'CCR_4', 'CCR_3',
'CCR_2', 'CCR_1', 'ACC'
],
)
else:
print('Evaluation has already been done!')
with pd.option_context(
'display.max_rows', None, 'display.max_columns', None,
'display.float_format',
'{:,.2f}'.format): # more options can be specified also
print(self.metrics[task])
return self.metrics[task]
def _eval_ood(self,
id_list: List[np.ndarray],
ood_split: str = 'near',
progress: bool = True):
print(f'Processing {ood_split} ood...', flush=True)
[id_pred, id_conf, id_gt] = id_list
metrics_list = []
for dataset_name, ood_dl in self.dataloader_dict['ood'][
ood_split].items():
if self.scores['ood'][ood_split][dataset_name] is None:
print(f'Performing inference on {dataset_name} dataset...',
flush=True)
ood_pred, ood_conf, ood_gt = self.postprocessor.inference(
self.net, ood_dl, progress)
self.scores['ood'][ood_split][dataset_name] = [
ood_pred, ood_conf, ood_gt
]
else:
print(
'Inference has been performed on '
f'{dataset_name} dataset...',
flush=True)
[ood_pred, ood_conf,
ood_gt] = self.scores['ood'][ood_split][dataset_name]
ood_gt = -1 * np.ones_like(ood_gt) # hard set to -1 as ood
pred = np.concatenate([id_pred, ood_pred])
conf = np.concatenate([id_conf, ood_conf])
label = np.concatenate([id_gt, ood_gt])
print(f'Computing metrics on {dataset_name} dataset...')
ood_metrics = compute_all_metrics(conf, label, pred)
metrics_list.append(ood_metrics)
self._print_metrics(ood_metrics)
print('Computing mean metrics...', flush=True)
metrics_list = np.array(metrics_list)
metrics_mean = np.mean(metrics_list, axis=0, keepdims=True)
self._print_metrics(list(metrics_mean[0]))
return np.concatenate([metrics_list, metrics_mean], axis=0) * 100
def _print_metrics(self, metrics):
[fpr, auroc, aupr_in, aupr_out,
ccr_4, ccr_3, ccr_2, ccr_1, _] \
= metrics
# print ood metric results
print('FPR@95: {:.2f}, AUROC: {:.2f}'.format(100 * fpr, 100 * auroc),
end=' ',
flush=True)
print('AUPR_IN: {:.2f}, AUPR_OUT: {:.2f}'.format(
100 * aupr_in, 100 * aupr_out),
flush=True)
print('CCR: {:.2f}, {:.2f}, {:.2f}, {:.2f},'.format(
ccr_4 * 100, ccr_3 * 100, ccr_2 * 100, ccr_1 * 100),
end=' ',
flush=True)
print(u'\u2500' * 70, flush=True)
print('', flush=True)
def hyperparam_search(self):
print('Starting automatic parameter search...')
max_auroc = 0
hyperparam_names = []
hyperparam_list = []
count = 0
for name in self.postprocessor.args_dict.keys():
hyperparam_names.append(name)
count += 1
for name in hyperparam_names:
hyperparam_list.append(self.postprocessor.args_dict[name])
hyperparam_combination = self.recursive_generator(
hyperparam_list, count)
final_index = None
for i, hyperparam in enumerate(hyperparam_combination):
self.postprocessor.set_hyperparam(hyperparam)
id_pred, id_conf, id_gt = self.postprocessor.inference(
self.net, self.dataloader_dict['id']['val'])
ood_pred, ood_conf, ood_gt = self.postprocessor.inference(
self.net, self.dataloader_dict['ood']['val'])
ood_gt = -1 * np.ones_like(ood_gt) # hard set to -1 as ood
pred = np.concatenate([id_pred, ood_pred])
conf = np.concatenate([id_conf, ood_conf])
label = np.concatenate([id_gt, ood_gt])
ood_metrics = compute_all_metrics(conf, label, pred)
auroc = ood_metrics[1]
print('Hyperparam: {}, auroc: {}'.format(hyperparam, auroc))
if auroc > max_auroc:
final_index = i
max_auroc = auroc
self.postprocessor.set_hyperparam(hyperparam_combination[final_index])
print('Final hyperparam: {}'.format(
self.postprocessor.get_hyperparam()))
self.postprocessor.hyperparam_search_done = True
def recursive_generator(self, list, n):
if n == 1:
results = []
for x in list[0]:
k = []
k.append(x)
results.append(k)
return results
else:
results = []
temp = self.recursive_generator(list, n - 1)
for x in list[n - 1]:
for y in temp:
k = y.copy()
k.append(x)
results.append(k)
return results
| 18,100 | 39.676404 | 79 | py |
null | OpenOOD-main/openood/evaluation_api/postprocessor.py | import os
import urllib.request
from openood.postprocessors import (
ASHPostprocessor, BasePostprocessor, ConfBranchPostprocessor,
CutPastePostprocessor, DICEPostprocessor, DRAEMPostprocessor,
DropoutPostProcessor, DSVDDPostprocessor, EBOPostprocessor,
EnsemblePostprocessor, GMMPostprocessor, GodinPostprocessor,
GradNormPostprocessor, GRAMPostprocessor, KLMatchingPostprocessor,
KNNPostprocessor, MaxLogitPostprocessor, MCDPostprocessor,
MDSPostprocessor, MDSEnsemblePostprocessor, MOSPostprocessor,
ODINPostprocessor, OpenGanPostprocessor, OpenMax, PatchcorePostprocessor,
Rd4adPostprocessor, ReactPostprocessor, ResidualPostprocessor,
SSDPostprocessor, TemperatureScalingPostprocessor, VIMPostprocessor,
RotPredPostprocessor, RankFeatPostprocessor, RMDSPostprocessor,
SHEPostprocessor, CIDERPostprocessor, NPOSPostprocessor)
from openood.utils.config import Config, merge_configs
postprocessors = {
'ash': ASHPostprocessor,
'cider': CIDERPostprocessor,
'conf_branch': ConfBranchPostprocessor,
'msp': BasePostprocessor,
'ebo': EBOPostprocessor,
'odin': ODINPostprocessor,
'mds': MDSPostprocessor,
'mds_ensemble': MDSEnsemblePostprocessor,
'npos': NPOSPostprocessor,
'rmds': RMDSPostprocessor,
'gmm': GMMPostprocessor,
'patchcore': PatchcorePostprocessor,
'openmax': OpenMax,
'react': ReactPostprocessor,
'vim': VIMPostprocessor,
'gradnorm': GradNormPostprocessor,
'godin': GodinPostprocessor,
'mds': MDSPostprocessor,
'gram': GRAMPostprocessor,
'cutpaste': CutPastePostprocessor,
'mls': MaxLogitPostprocessor,
'residual': ResidualPostprocessor,
'klm': KLMatchingPostprocessor,
'temp_scaling': TemperatureScalingPostprocessor,
'ensemble': EnsemblePostprocessor,
'dropout': DropoutPostProcessor,
'draem': DRAEMPostprocessor,
'dsvdd': DSVDDPostprocessor,
'mos': MOSPostprocessor,
'mcd': MCDPostprocessor,
'opengan': OpenGanPostprocessor,
'knn': KNNPostprocessor,
'dice': DICEPostprocessor,
'ssd': SSDPostprocessor,
'she': SHEPostprocessor,
'rd4ad': Rd4adPostprocessor,
'rotpred': RotPredPostprocessor,
'rankfeat': RankFeatPostprocessor
}
link_prefix = 'https://raw.githubusercontent.com/Jingkang50/OpenOOD/main/configs/postprocessors/'
def get_postprocessor(config_root: str, postprocessor_name: str,
id_data_name: str):
postprocessor_config_path = os.path.join(config_root, 'postprocessors',
f'{postprocessor_name}.yml')
if not os.path.exists(postprocessor_config_path):
os.makedirs(os.path.dirname(postprocessor_config_path), exist_ok=True)
urllib.request.urlretrieve(link_prefix + f'{postprocessor_name}.yml',
postprocessor_config_path)
config = Config(postprocessor_config_path)
config = merge_configs(config,
Config(**{'dataset': {
'name': id_data_name
}}))
postprocessor = postprocessors[postprocessor_name](config)
postprocessor.APS_mode = config.postprocessor.APS_mode
postprocessor.hyperparam_search_done = False
return postprocessor
| 3,284 | 39.555556 | 97 | py |
null | OpenOOD-main/openood/evaluation_api/preprocessor.py | import torchvision.transforms as tvs_trans
from openood.preprocessors import BasePreprocessor
from openood.utils import Config
INTERPOLATION = tvs_trans.InterpolationMode.BILINEAR
default_preprocessing_dict = {
'cifar10': {
'pre_size': 32,
'img_size': 32,
'normalization': [[0.4914, 0.4822, 0.4465], [0.2470, 0.2435, 0.2616]],
},
'cifar100': {
'pre_size': 32,
'img_size': 32,
'normalization': [[0.5071, 0.4867, 0.4408], [0.2675, 0.2565, 0.2761]],
},
'imagenet': {
'pre_size': 256,
'img_size': 224,
'normalization': [[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]],
},
'imagenet200': {
'pre_size': 256,
'img_size': 224,
'normalization': [[0.485, 0.456, 0.406], [0.229, 0.224, 0.225]],
},
'aircraft': {
'pre_size': 512,
'img_size': 448,
'normalization': [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]],
},
'cub': {
'pre_size': 512,
'img_size': 448,
'normalization': [[0.5, 0.5, 0.5], [0.5, 0.5, 0.5]],
}
}
class Convert:
def __init__(self, mode='RGB'):
self.mode = mode
def __call__(self, image):
return image.convert(self.mode)
class TestStandardPreProcessor(BasePreprocessor):
"""For test and validation dataset standard image transformation."""
def __init__(self, config: Config):
self.transform = tvs_trans.Compose([
Convert('RGB'),
tvs_trans.Resize(config.pre_size, interpolation=INTERPOLATION),
tvs_trans.CenterCrop(config.img_size),
tvs_trans.ToTensor(),
tvs_trans.Normalize(*config.normalization),
])
class ImageNetCPreProcessor(BasePreprocessor):
def __init__(self, mean, std):
self.transform = tvs_trans.Compose([
tvs_trans.ToTensor(),
tvs_trans.Normalize(mean, std),
])
def get_default_preprocessor(data_name: str):
# TODO: include fine-grained datasets proposed in Vaze et al.?
if data_name not in default_preprocessing_dict:
raise NotImplementedError(f'The dataset {data_name} is not supported')
config = Config(**default_preprocessing_dict[data_name])
preprocessor = TestStandardPreProcessor(config)
return preprocessor
| 2,304 | 27.8125 | 78 | py |
null | OpenOOD-main/openood/evaluators/__init__.py | from .utils import get_evaluator
| 33 | 16 | 32 | py |
null | OpenOOD-main/openood/evaluators/ad_evaluator.py | import numpy as np
import torch
from sklearn.metrics import auc, roc_curve
from openood.utils import Config
class ADEvaluator():
def __init__(self, config: Config):
self.config = config
def eval_ood(self,
net,
id_data_loader,
ood_data_loaders,
postprocessor,
epoch_idx: int = -1):
with torch.no_grad():
if type(net) is dict:
for subnet in net.values():
subnet.eval()
else:
net.eval()
auroc = self.get_auroc(net, id_data_loader['test'],
ood_data_loaders['val'], postprocessor)
metrics = {
'epoch_idx': epoch_idx,
'image_auroc': auroc,
}
return metrics
def report(self, test_metrics):
print('Complete Evaluation:\n'
'{}\n'
'==============================\n'
'AUC Image: {:.2f} \n'
'=============================='.format(
self.config.dataset.name,
100.0 * test_metrics['image_auroc']),
flush=True)
print('Completed!', flush=True)
def get_auroc(self, net, id_data_loader, ood_data_loader, postprocessor):
_, id_conf, id_gt = postprocessor.inference(net, id_data_loader)
_, ood_conf, ood_gt = postprocessor.inference(net, ood_data_loader)
ood_gt = -1 * np.ones_like(ood_gt) # hard set to -1 as ood
conf = np.concatenate([id_conf, ood_conf])
label = np.concatenate([id_gt, ood_gt])
ind_indicator = np.zeros_like(label)
ind_indicator[label != -1] = 1
fpr, tpr, _ = roc_curve(ind_indicator, conf)
auroc = auc(fpr, tpr)
return auroc
| 1,848 | 29.816667 | 77 | py |
null | OpenOOD-main/openood/evaluators/arpl_evaluator.py | from typing import Dict
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from openood.postprocessors import BasePostprocessor
from openood.utils import Config
from .ood_evaluator import OODEvaluator
class ARPLEvaluator(OODEvaluator):
def __init__(self, config: Config):
self.config = config
def eval_acc(self,
net: dict,
data_loader: DataLoader,
postprocessor: BasePostprocessor = None,
epoch_idx: int = -1,
fsood: bool = False,
csid_data_loaders: Dict[str, DataLoader] = None):
criterion = net['criterion']
net = net['netF']
net.eval()
loss_avg = 0.0
correct = 0
with torch.no_grad():
for batch in tqdm(data_loader,
desc='Eval: ',
position=0,
leave=True):
# prepare data
data = batch['data'].cuda()
target = batch['label'].cuda()
# forward
_, feat = net(data, return_feature=True)
output, loss = criterion(feat, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum().item()
# test loss average
loss_avg += float(loss.data)
if not fsood:
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = loss_avg / len(data_loader)
metrics['acc'] = correct / len(data_loader.dataset)
return metrics
else:
all_correct = 0
all_total = 0
all_correct += correct
all_total += len(data_loader.dataset)
assert csid_data_loaders is not None
for dataset_name, csid_dl in csid_data_loaders.items():
correct = 0
with torch.no_grad():
for batch in tqdm(csid_dl,
desc='Eval: ',
position=0,
leave=True):
# prepare data
data = batch['data'].cuda()
target = batch['label'].cuda()
# forward
_, feat = net(data, return_feature=True)
output, loss = criterion(feat, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum().item()
all_correct += correct
all_total += len(csid_dl.dataset)
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['acc'] = all_correct / all_total
return metrics
def eval_ood(self,
net: dict,
id_data_loader: DataLoader,
ood_data_loaders: Dict[str, Dict[str, DataLoader]],
postprocessor: BasePostprocessor,
fsood: bool = False):
criterion = net['criterion']
net = net['netF']
net = nn.Sequential(
net,
criterion,
)
net.eval()
# load training in-distribution data
assert 'test' in id_data_loader, \
'id_data_loaders should have the key: test!'
dataset_name = self.config.dataset.name
print(f'Performing inference on {dataset_name} dataset...', flush=True)
id_pred, id_conf, id_gt = postprocessor.inference(
net, id_data_loader['test'])
if self.config.recorder.save_scores:
self._save_scores(id_pred, id_conf, id_gt, dataset_name)
if fsood:
# load csid data and compute confidence
for dataset_name, csid_dl in ood_data_loaders['csid'].items():
print(f'Performing inference on {dataset_name} dataset...',
flush=True)
csid_pred, csid_conf, csid_gt = postprocessor.inference(
net, csid_dl)
if self.config.recorder.save_scores:
self._save_scores(csid_pred, csid_conf, csid_gt,
dataset_name)
id_pred = np.concatenate([id_pred, csid_pred])
id_conf = np.concatenate([id_conf, csid_conf])
id_gt = np.concatenate([id_gt, csid_gt])
# load nearood data and compute ood metrics
self._eval_ood(net, [id_pred, id_conf, id_gt],
ood_data_loaders,
postprocessor,
ood_split='nearood')
# load farood data and compute ood metrics
self._eval_ood(net, [id_pred, id_conf, id_gt],
ood_data_loaders,
postprocessor,
ood_split='farood')
| 5,048 | 35.064286 | 79 | py |
null | OpenOOD-main/openood/evaluators/base_evaluator.py | import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.postprocessors import BasePostprocessor
from openood.utils import Config
def to_np(x):
return x.data.cpu().numpy()
class BaseEvaluator:
def __init__(self, config: Config):
self.config = config
def eval_acc(self,
net: nn.Module,
data_loader: DataLoader,
postprocessor: BasePostprocessor = None,
epoch_idx: int = -1):
net.eval()
loss_avg = 0.0
correct = 0
with torch.no_grad():
for batch in tqdm(data_loader,
desc='Eval: ',
position=0,
leave=True,
disable=not comm.is_main_process()):
# prepare data
data = batch['data'].cuda()
target = batch['label'].cuda()
# forward
output = net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum().item()
# test loss average
loss_avg += float(loss.data)
loss = loss_avg / len(data_loader)
acc = correct / len(data_loader.dataset)
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = self.save_metrics(loss)
metrics['acc'] = self.save_metrics(acc)
return metrics
def extract(self,
net: nn.Module,
data_loader: DataLoader,
filename: str = 'feature'):
net.eval()
feat_list, label_list = [], []
with torch.no_grad():
for batch in tqdm(data_loader,
desc='Feature Extracting: ',
position=0,
leave=True,
disable=not comm.is_main_process()):
data = batch['data'].cuda()
label = batch['label']
_, feat = net(data, return_feature=True)
feat_list.extend(to_np(feat))
label_list.extend(to_np(label))
feat_list = np.array(feat_list)
label_list = np.array(label_list)
save_dir = self.config.output_dir
os.makedirs(save_dir, exist_ok=True)
np.savez(os.path.join(save_dir, filename),
feat_list=feat_list,
label_list=label_list)
def save_metrics(self, value):
all_values = comm.gather(value)
temp = 0
for i in all_values:
temp = temp + i
# total_value = np.add([x for x in all_values])s
return temp
| 2,926 | 28.27 | 66 | py |
null | OpenOOD-main/openood/evaluators/ece_evaluator.py | import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.postprocessors import BasePostprocessor
from openood.utils import Config
from .base_evaluator import BaseEvaluator
class ECEEvaluator(BaseEvaluator):
def __init__(self, config: Config):
"""OOD Evaluator.
Args:
config (Config): Config file from
"""
super(ECEEvaluator, self).__init__(config)
def eval_acc(self,
net: nn.Module,
data_loader: DataLoader,
postprocessor: BasePostprocessor = None,
epoch_idx: int = -1,
num_bins: int = 15):
net.eval()
"""Calculates ECE.
Args:
num_bins: the number of bins to partition all samples. we set it as 15.
Returns:
ece: the calculated ECE value.
"""
loss_avg = 0.0
correct = 0
total_scores = []
total_preds = []
total_labels = []
with torch.no_grad():
for batch in tqdm(data_loader,
desc='Eval: ',
position=0,
leave=True):
# prepare data
data = batch['data'].cuda()
target = batch['label'].cuda()
# forward
output = net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
score = output.data.max(1)[0]
correct += pred.eq(target.data).sum().item()
# test loss average
loss_avg += float(loss.data)
total_preds.append(pred.cpu().numpy().reshape(-1))
total_scores.append(score.cpu().numpy().reshape(-1))
total_labels.append(target.data.cpu().numpy().reshape(-1))
scores_np = np.reshape(total_scores, -1)
preds_np = np.reshape(total_preds, -1)
labels_np = np.reshape(total_labels, -1)
acc_tab = np.zeros(num_bins) # Empirical (true) confidence
mean_conf = np.zeros(num_bins) # Predicted confidence
nb_items_bin = np.zeros(num_bins) # Number of items in the bins
tau_tab = np.linspace(0, 1, num_bins + 1) # Confidence bins
for i in np.arange(num_bins): # Iterates over the bins
# Selects the items where the predicted max probability falls in the bin
# [tau_tab[i], tau_tab[i + 1)]
sec = (tau_tab[i + 1] > scores_np) & (scores_np >= tau_tab[i])
nb_items_bin[i] = np.sum(sec) # Number of items in the bin
# Selects the predicted classes, and the true classes
class_pred_sec, y_sec = preds_np[sec], labels_np[sec]
# Averages of the predicted max probabilities
mean_conf[i] = np.mean(
scores_np[sec]) if nb_items_bin[i] > 0 else np.nan
# Computes the empirical confidence
acc_tab[i] = np.mean(
class_pred_sec == y_sec) if nb_items_bin[i] > 0 else np.nan
# Cleaning
mean_conf = mean_conf[nb_items_bin > 0]
acc_tab = acc_tab[nb_items_bin > 0]
nb_items_bin = nb_items_bin[nb_items_bin > 0]
if sum(nb_items_bin) != 0:
ece = np.average(
np.absolute(mean_conf - acc_tab),
weights=nb_items_bin.astype(np.float) / np.sum(nb_items_bin))
else:
ece = 0.0
loss = loss_avg / len(data_loader)
acc = correct / len(data_loader.dataset)
metrics = {}
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = self.save_metrics(loss)
metrics['acc'] = self.save_metrics(acc)
metrics['ece'] = self.save_metrics(ece)
return metrics
| 3,959 | 35.330275 | 84 | py |
null | OpenOOD-main/openood/evaluators/fsood_evaluator.py | import csv
import os
from typing import Dict, List
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from openood.postprocessors import BasePostprocessor
from .ood_evaluator import OODEvaluator
class FSOODEvaluator(OODEvaluator):
def eval_csid_acc(self, net: nn.Module,
csid_loaders: Dict[str, Dict[str, DataLoader]]):
# ensure the networks in eval mode
net.eval()
for dataset_name, csid_dl in csid_loaders.items():
print(f'Computing accuracy on {dataset_name} dataset...')
correct = 0
with torch.no_grad():
for batch in csid_dl:
data = batch['data'].cuda()
target = batch['label'].cuda()
# forward
output = net(data)
# accuracy
pred = output.data.max(1)[1]
correct += pred.eq(target.data).sum().item()
acc = correct / len(csid_dl.dataset)
if self.config.recorder.save_csv:
self._save_acc_results(acc, dataset_name)
print(u'\u2500' * 70, flush=True)
def _save_acc_results(self, acc, dataset_name):
write_content = {
'dataset': dataset_name,
'FPR@95': '-',
'AUROC': '-',
'AUPR_IN': '-',
'AUPR_OUT': '-',
'CCR_4': '-',
'CCR_3': '-',
'CCR_2': '-',
'CCR_1': '-',
'ACC': '{:.2f}'.format(100 * acc),
}
fieldnames = list(write_content.keys())
# print csid metric results
print('CSID[{}] accuracy: {:.2f}%'.format(dataset_name, 100 * acc),
flush=True)
csv_path = os.path.join(self.config.output_dir, 'csid.csv')
if not os.path.exists(csv_path):
with open(csv_path, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerow(write_content)
else:
with open(csv_path, 'a', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(write_content)
def eval_ood(self, net: nn.Module, id_data_loader: List[DataLoader],
ood_data_loaders: List[DataLoader],
postprocessor: BasePostprocessor):
# ensure the networks in eval mode
net.eval()
# load training in-distribution data
assert 'test' in id_data_loader, \
'id_data_loaders should have the key: test!'
dataset_name = self.config.dataset.name
print(f'Performing inference on {dataset_name} dataset...', flush=True)
id_pred, id_conf, id_gt = postprocessor.inference(
net, id_data_loader['test'])
if self.config.recorder.save_scores:
self._save_scores(id_pred, id_conf, id_gt, dataset_name)
# load csid data and compute confidence
for dataset_name, csid_dl in ood_data_loaders['csid'].items():
print(f'Performing inference on {dataset_name} dataset...',
flush=True)
csid_pred, csid_conf, csid_gt = postprocessor.inference(
net, csid_dl)
if self.config.recorder.save_scores:
self._save_scores(csid_pred, csid_conf, csid_gt, dataset_name)
id_pred = np.concatenate([id_pred, csid_pred])
id_conf = np.concatenate([id_conf, csid_conf])
id_gt = np.concatenate([id_gt, csid_gt])
# compute accuracy on csid
print(u'\u2500' * 70, flush=True)
self.eval_csid_acc(net, ood_data_loaders['csid'])
# load nearood data and compute ood metrics
print(u'\u2500' * 70, flush=True)
self._eval_ood(net, [id_pred, id_conf, id_gt],
ood_data_loaders,
postprocessor,
ood_split='nearood')
# load farood data and compute ood metrics
print(u'\u2500' * 70, flush=True)
self._eval_ood(net, [id_pred, id_conf, id_gt],
ood_data_loaders,
postprocessor,
ood_split='farood')
| 4,310 | 38.550459 | 79 | py |
null | OpenOOD-main/openood/evaluators/metrics.py | import numpy as np
from sklearn import metrics
def compute_all_metrics(conf, label, pred):
np.set_printoptions(precision=3)
recall = 0.95
fpr, thresh = fpr_recall(conf, label, recall)
auroc, aupr_in, aupr_out = auc(conf, label)
ccr_1 = ccr_fpr(conf, 0.1, pred, label)
ccr_2 = ccr_fpr(conf, 0.01, pred, label)
ccr_3 = ccr_fpr(conf, 0.001, pred, label)
ccr_4 = ccr_fpr(conf, 0.0001, pred, label)
accuracy = acc(pred, label)
results1 = np.array(
[fpr, auroc, aupr_in, aupr_out, ccr_4, ccr_3, ccr_2, ccr_1, accuracy])
results = [
fpr, auroc, aupr_in, aupr_out, ccr_4, ccr_3, ccr_2, ccr_1, accuracy
]
return results
# accuracy
def acc(pred, label):
ind_pred = pred[label != -1]
ind_label = label[label != -1]
num_tp = np.sum(ind_pred == ind_label)
acc = num_tp / len(ind_label)
return acc
# fpr_recall
def fpr_recall(conf, label, tpr):
# ind_conf = conf[label != -1]
# ood_conf = conf[label == -1]
# num_ind = len(ind_conf)
# num_ood = len(ood_conf)
gt = np.ones_like(label)
gt[label == -1] = 0
# recall_num = int(np.floor(tpr * num_ind))
# thresh = np.sort(ind_conf)[-recall_num]
# num_fp = np.sum(ood_conf > thresh)
# fpr = num_fp / num_ood
fpr_list, tpr_list, threshold_list = metrics.roc_curve(gt, conf)
fpr = fpr_list[np.argmax(tpr_list >= tpr)]
thresh = threshold_list[np.argmax(tpr_list >= tpr)]
return fpr, thresh
# auc
def auc(conf, label):
ind_indicator = np.zeros_like(label)
ind_indicator[label != -1] = 1
fpr, tpr, thresholds = metrics.roc_curve(ind_indicator, conf)
precision_in, recall_in, thresholds_in \
= metrics.precision_recall_curve(ind_indicator, conf)
precision_out, recall_out, thresholds_out \
= metrics.precision_recall_curve(1 - ind_indicator, 1 - conf)
auroc = metrics.auc(fpr, tpr)
aupr_in = metrics.auc(recall_in, precision_in)
aupr_out = metrics.auc(recall_out, precision_out)
return auroc, aupr_in, aupr_out
# ccr_fpr
def ccr_fpr(conf, fpr, pred, label):
ind_conf = conf[label != -1]
ind_pred = pred[label != -1]
ind_label = label[label != -1]
ood_conf = conf[label == -1]
num_ind = len(ind_conf)
num_ood = len(ood_conf)
fp_num = int(np.ceil(fpr * num_ood))
thresh = np.sort(ood_conf)[-fp_num]
num_tp = np.sum((ind_conf > thresh) * (ind_pred == ind_label))
ccr = num_tp / num_ind
return ccr
def detection(ind_confidences,
ood_confidences,
n_iter=100000,
return_data=False):
# calculate the minimum detection error
Y1 = ood_confidences
X1 = ind_confidences
start = np.min([np.min(X1), np.min(Y1)])
end = np.max([np.max(X1), np.max(Y1)])
gap = (end - start) / n_iter
best_error = 1.0
best_delta = None
all_thresholds = []
all_errors = []
for delta in np.arange(start, end, gap):
tpr = np.sum(np.sum(X1 < delta)) / np.float(len(X1))
error2 = np.sum(np.sum(Y1 > delta)) / np.float(len(Y1))
detection_error = (tpr + error2) / 2.0
if return_data:
all_thresholds.append(delta)
all_errors.append(detection_error)
if detection_error < best_error:
best_error = np.minimum(best_error, detection_error)
best_delta = delta
if return_data:
return best_error, best_delta, all_errors, all_thresholds
else:
return best_error, best_delta
| 3,520 | 25.877863 | 78 | py |
null | OpenOOD-main/openood/evaluators/mos_evaluator.py | import csv
import os
from typing import Dict, List
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
import openood.utils.comm as comm
from openood.postprocessors import BasePostprocessor
from openood.utils import Config
from .base_evaluator import BaseEvaluator
from .metrics import compute_all_metrics
def topk(output, target, ks=(1, )):
"""Returns one boolean vector for each k, whether the target is within the
output's top-k."""
_, pred = output.topk(max(ks), 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
return [correct[:k].max(0)[0] for k in ks]
def get_group_slices(classes_per_group):
group_slices = []
start = 0
for num_cls in classes_per_group:
end = start + num_cls + 1
group_slices.append([start, end])
start = end
return torch.LongTensor(group_slices)
def cal_ood_score(logits, group_slices):
num_groups = group_slices.shape[0]
all_group_ood_score_MOS = []
for i in range(num_groups):
group_logit = logits[:, group_slices[i][0]:group_slices[i][1]]
group_softmax = F.softmax(group_logit, dim=-1)
group_others_score = group_softmax[:, 0]
all_group_ood_score_MOS.append(-group_others_score)
all_group_ood_score_MOS = torch.stack(all_group_ood_score_MOS, dim=1)
final_max_score_MOS, _ = torch.max(all_group_ood_score_MOS, dim=1)
return final_max_score_MOS.data.cpu().numpy()
def iterate_data(data_loader, model, group_slices):
confs_mos = []
dataiter = iter(data_loader)
with torch.no_grad():
for _ in tqdm(range(1,
len(dataiter) + 1),
desc='Batches',
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(dataiter)
data = batch['data'].cuda()
logits = model(data)
conf_mos = cal_ood_score(logits, group_slices)
confs_mos.extend(conf_mos)
return np.array(confs_mos)
def calc_group_softmax_acc(logits, labels, group_slices):
num_groups = group_slices.shape[0]
loss = 0
num_samples = logits.shape[0]
all_group_max_score, all_group_max_class = [], []
smax = torch.nn.Softmax(dim=-1).cuda()
cri = torch.nn.CrossEntropyLoss(reduction='none').cuda()
for i in range(num_groups):
group_logit = logits[:, group_slices[i][0]:group_slices[i][1]]
group_label = labels[:, i]
loss += cri(group_logit, group_label)
group_softmax = smax(group_logit)
group_softmax = group_softmax[:, 1:] # disregard others category
group_max_score, group_max_class = torch.max(group_softmax, dim=1)
group_max_class += 1 # shift the class index by 1
all_group_max_score.append(group_max_score)
all_group_max_class.append(group_max_class)
all_group_max_score = torch.stack(all_group_max_score, dim=1)
all_group_max_class = torch.stack(all_group_max_class, dim=1)
final_max_score, max_group = torch.max(all_group_max_score, dim=1)
pred_cls_within_group = all_group_max_class[torch.arange(num_samples),
max_group]
gt_class, gt_group = torch.max(labels, dim=1)
selected_groups = (max_group == gt_group)
pred_acc = torch.zeros(logits.shape[0]).bool().cuda()
pred_acc[selected_groups] = (
pred_cls_within_group[selected_groups] == gt_class[selected_groups])
return loss, pred_acc
def run_eval_acc(model, data_loader, group_slices, num_group):
# switch to evaluate mode
model.eval()
print('Running validation...')
all_c, all_top1 = [], []
train_dataiter = iter(data_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='Test: ',
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
data = batch['data'].cuda()
group_label = batch['group_label'].cuda()
class_label = batch['class_label'].cuda()
labels = []
for i in range(len(group_label)):
label = torch.zeros(num_group, dtype=torch.int64)
label[group_label[i]] = class_label[i] + 1
labels.append(label.unsqueeze(0))
labels = torch.cat(labels, dim=0).cuda()
with torch.no_grad():
# compute output, measure accuracy and record loss.
logits = model(data)
if group_slices is not None:
c, top1 = calc_group_softmax_acc(logits, labels, group_slices)
else:
c = torch.nn.CrossEntropyLoss(reduction='none')(logits, labels)
top1 = topk(logits, labels, ks=(1, ))[0]
all_c.extend(c.cpu()) # Also ensures a sync point.
all_top1.extend(top1.cpu())
model.train()
# all_c is val loss
# all_top1 is val top1 acc
return all_c, all_top1
class MOSEvaluator(BaseEvaluator):
def __init__(self, config: Config):
super(MOSEvaluator, self).__init__(config)
self.config = config
self.num_groups = None
self.group_slices = None
self.acc = None
def cal_group_slices(self, train_loader):
config = self.config
# if specified group_config
if (config.trainer.group_config.endswith('npy')):
classes_per_group = np.load(config.trainer.group_config)
elif (config.trainer.group_config.endswith('txt')):
classes_per_group = np.loadtxt(config.trainer.group_config,
dtype=int)
else:
# cal group config
config = self.config
group = {}
train_dataiter = iter(train_loader)
for train_step in tqdm(range(1,
len(train_dataiter) + 1),
desc='cal group_config',
position=0,
leave=True,
disable=not comm.is_main_process()):
batch = next(train_dataiter)
group_label = batch['group_label']
class_label = batch['class_label']
for i in range(len(class_label)):
gl = group_label[i].item()
cl = class_label[i].item()
try:
group[str(gl)]
except:
group[str(gl)] = []
if cl not in group[str(gl)]:
group[str(gl)].append(cl)
classes_per_group = []
for i in range(len(group)):
classes_per_group.append(max(group[str(i)]) + 1)
self.num_groups = len(classes_per_group)
self.group_slices = get_group_slices(classes_per_group)
self.group_slices = self.group_slices.cuda()
def eval_ood(self,
net: nn.Module,
id_data_loader: DataLoader,
ood_data_loaders: Dict[str, Dict[str, DataLoader]],
postprocessor=None,
fsood=False):
net.eval()
if self.group_slices is None or self.num_groups is None:
self.cal_group_slices(id_data_loader['train'])
dataset_name = self.config.dataset.name
print(f'Performing inference on {dataset_name} dataset...', flush=True)
id_conf = iterate_data(id_data_loader['test'], net, self.group_slices)
# dummy pred and gt
# the accuracy will be handled by self.eval_acc
id_pred = np.zeros_like(id_conf)
id_gt = np.zeros_like(id_conf)
if fsood:
# load csid data and compute confidence
for dataset_name, csid_dl in ood_data_loaders['csid'].items():
print(f'Performing inference on {dataset_name} dataset...',
flush=True)
csid_conf = iterate_data(csid_dl, net, self.group_slices)
# dummy pred and gt
# the accuracy will be handled by self.eval_acc
csid_pred = np.zeros_like(csid_conf)
csid_gt = np.zeros_like(csid_conf)
if self.config.recorder.save_scores:
self._save_scores(csid_pred, csid_conf, csid_gt,
dataset_name)
id_pred = np.concatenate([id_pred, csid_pred])
id_conf = np.concatenate([id_conf, csid_conf])
id_gt = np.concatenate([id_gt, csid_gt])
# load nearood data and compute ood metrics
self._eval_ood(net, [id_pred, id_conf, id_gt],
ood_data_loaders,
ood_split='nearood')
# load farood data and compute ood metrics
self._eval_ood(net, [id_pred, id_conf, id_gt],
ood_data_loaders,
ood_split='farood')
def _eval_ood(self,
net: nn.Module,
id_list: List[np.ndarray],
ood_data_loaders: Dict[str, Dict[str, DataLoader]],
ood_split: str = 'nearood'):
print(f'Processing {ood_split}...', flush=True)
[id_pred, id_conf, id_gt] = id_list
metrics_list = []
for dataset_name, ood_dl in ood_data_loaders[ood_split].items():
print(f'Performing inference on {dataset_name} dataset...',
flush=True)
ood_conf = iterate_data(ood_dl, net, self.group_slices)
ood_gt = -1 * np.ones_like(ood_conf) # hard set to -1 as ood
# dummy pred
ood_pred = np.zeros_like(ood_conf)
if self.config.recorder.save_scores:
self._save_scores(ood_pred, ood_conf, ood_gt, dataset_name)
pred = np.concatenate([id_pred, ood_pred])
conf = np.concatenate([id_conf, ood_conf])
label = np.concatenate([id_gt, ood_gt])
print(f'Computing metrics on {dataset_name} dataset...')
ood_metrics = compute_all_metrics(conf, label, pred)
# the acc here is not reliable
# since we use dummy pred and gt for id samples
# so we use the acc computed by self.eval_acc
ood_metrics[-1] = self.acc
if self.config.recorder.save_csv:
self._save_csv(ood_metrics, dataset_name=dataset_name)
metrics_list.append(ood_metrics)
print('Computing mean metrics...', flush=True)
metrics_list = np.array(metrics_list)
metrics_mean = np.mean(metrics_list, axis=0)
if self.config.recorder.save_csv:
self._save_csv(metrics_mean, dataset_name=ood_split)
def _save_csv(self, metrics, dataset_name):
[fpr, auroc, aupr_in, aupr_out,
ccr_4, ccr_3, ccr_2, ccr_1, accuracy] \
= metrics
write_content = {
'dataset': dataset_name,
'FPR@95': '{:.2f}'.format(100 * fpr),
'AUROC': '{:.2f}'.format(100 * auroc),
'AUPR_IN': '{:.2f}'.format(100 * aupr_in),
'AUPR_OUT': '{:.2f}'.format(100 * aupr_out),
'CCR_4': '{:.2f}'.format(100 * ccr_4),
'CCR_3': '{:.2f}'.format(100 * ccr_3),
'CCR_2': '{:.2f}'.format(100 * ccr_2),
'CCR_1': '{:.2f}'.format(100 * ccr_1),
'ACC': '{:.2f}'.format(100 * accuracy)
}
fieldnames = list(write_content.keys())
# print ood metric results
print('FPR@95: {:.2f}, AUROC: {:.2f}'.format(100 * fpr, 100 * auroc),
end=' ',
flush=True)
print('AUPR_IN: {:.2f}, AUPR_OUT: {:.2f}'.format(
100 * aupr_in, 100 * aupr_out),
flush=True)
print('CCR: {:.2f}, {:.2f}, {:.2f}, {:.2f},'.format(
ccr_4 * 100, ccr_3 * 100, ccr_2 * 100, ccr_1 * 100),
end=' ',
flush=True)
print('ACC: {:.2f}'.format(accuracy * 100), flush=True)
print(u'\u2500' * 70, flush=True)
csv_path = os.path.join(self.config.output_dir, 'ood.csv')
if not os.path.exists(csv_path):
with open(csv_path, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerow(write_content)
else:
with open(csv_path, 'a', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(write_content)
def _save_scores(self, pred, conf, gt, save_name):
save_dir = os.path.join(self.config.output_dir, 'scores')
os.makedirs(save_dir, exist_ok=True)
np.savez(os.path.join(save_dir, save_name),
pred=pred,
conf=conf,
label=gt)
def eval_acc(self,
net: nn.Module,
data_loader: DataLoader,
postprocessor: BasePostprocessor = None,
epoch_idx: int = -1,
num_groups: int = None,
group_slices: torch.Tensor = None,
fsood: bool = False,
csid_data_loaders: DataLoader = None):
net.eval()
if num_groups is None or group_slices is None:
self.cal_group_slices(data_loader)
else:
self.num_groups = num_groups
self.group_slices = group_slices.cuda()
loss, top1 = run_eval_acc(net, data_loader, self.group_slices,
self.num_groups)
if fsood:
assert csid_data_loaders is not None
for dataset_name, csid_dl in csid_data_loaders.items():
_, temp = run_eval_acc(net, csid_dl, self.group_slices,
self.num_groups)
top1.extend(temp)
metrics = {}
metrics['acc'] = np.mean(top1)
metrics['epoch_idx'] = epoch_idx
metrics['loss'] = np.mean(loss)
self.acc = metrics['acc']
return metrics
def report(self, test_metrics):
print('Completed!', flush=True)
| 14,464 | 36.280928 | 79 | py |
null | OpenOOD-main/openood/evaluators/ood_evaluator.py | import csv
import os
from typing import Dict, List
import numpy as np
import torch.nn as nn
from torch.utils.data import DataLoader
from openood.postprocessors import BasePostprocessor
from openood.utils import Config
from .base_evaluator import BaseEvaluator
from .metrics import compute_all_metrics
class OODEvaluator(BaseEvaluator):
def __init__(self, config: Config):
"""OOD Evaluator.
Args:
config (Config): Config file from
"""
super(OODEvaluator, self).__init__(config)
self.id_pred = None
self.id_conf = None
self.id_gt = None
def eval_ood(self,
net: nn.Module,
id_data_loaders: Dict[str, DataLoader],
ood_data_loaders: Dict[str, Dict[str, DataLoader]],
postprocessor: BasePostprocessor,
fsood: bool = False):
if type(net) is dict:
for subnet in net.values():
subnet.eval()
else:
net.eval()
assert 'test' in id_data_loaders, \
'id_data_loaders should have the key: test!'
dataset_name = self.config.dataset.name
if self.config.postprocessor.APS_mode:
assert 'val' in id_data_loaders
assert 'val' in ood_data_loaders
self.hyperparam_search(net, id_data_loaders['val'],
ood_data_loaders['val'], postprocessor)
print(f'Performing inference on {dataset_name} dataset...', flush=True)
id_pred, id_conf, id_gt = postprocessor.inference(
net, id_data_loaders['test'])
if self.config.recorder.save_scores:
self._save_scores(id_pred, id_conf, id_gt, dataset_name)
if fsood:
# load csid data and compute confidence
for dataset_name, csid_dl in ood_data_loaders['csid'].items():
print(f'Performing inference on {dataset_name} dataset...',
flush=True)
csid_pred, csid_conf, csid_gt = postprocessor.inference(
net, csid_dl)
if self.config.recorder.save_scores:
self._save_scores(csid_pred, csid_conf, csid_gt,
dataset_name)
id_pred = np.concatenate([id_pred, csid_pred])
id_conf = np.concatenate([id_conf, csid_conf])
id_gt = np.concatenate([id_gt, csid_gt])
# load nearood data and compute ood metrics
print(u'\u2500' * 70, flush=True)
self._eval_ood(net, [id_pred, id_conf, id_gt],
ood_data_loaders,
postprocessor,
ood_split='nearood')
# load farood data and compute ood metrics
print(u'\u2500' * 70, flush=True)
self._eval_ood(net, [id_pred, id_conf, id_gt],
ood_data_loaders,
postprocessor,
ood_split='farood')
def _eval_ood(self,
net: nn.Module,
id_list: List[np.ndarray],
ood_data_loaders: Dict[str, Dict[str, DataLoader]],
postprocessor: BasePostprocessor,
ood_split: str = 'nearood'):
print(f'Processing {ood_split}...', flush=True)
[id_pred, id_conf, id_gt] = id_list
metrics_list = []
for dataset_name, ood_dl in ood_data_loaders[ood_split].items():
print(f'Performing inference on {dataset_name} dataset...',
flush=True)
ood_pred, ood_conf, ood_gt = postprocessor.inference(net, ood_dl)
ood_gt = -1 * np.ones_like(ood_gt) # hard set to -1 as ood
if self.config.recorder.save_scores:
self._save_scores(ood_pred, ood_conf, ood_gt, dataset_name)
pred = np.concatenate([id_pred, ood_pred])
conf = np.concatenate([id_conf, ood_conf])
label = np.concatenate([id_gt, ood_gt])
print(f'Computing metrics on {dataset_name} dataset...')
ood_metrics = compute_all_metrics(conf, label, pred)
if self.config.recorder.save_csv:
self._save_csv(ood_metrics, dataset_name=dataset_name)
metrics_list.append(ood_metrics)
print('Computing mean metrics...', flush=True)
metrics_list = np.array(metrics_list)
metrics_mean = np.mean(metrics_list, axis=0)
if self.config.recorder.save_csv:
self._save_csv(metrics_mean, dataset_name=ood_split)
def eval_ood_val(self, net: nn.Module, id_data_loaders: Dict[str,
DataLoader],
ood_data_loaders: Dict[str, DataLoader],
postprocessor: BasePostprocessor):
if type(net) is dict:
for subnet in net.values():
subnet.eval()
else:
net.eval()
assert 'val' in id_data_loaders
assert 'val' in ood_data_loaders
if self.config.postprocessor.APS_mode:
val_auroc = self.hyperparam_search(net, id_data_loaders['val'],
ood_data_loaders['val'],
postprocessor)
else:
id_pred, id_conf, id_gt = postprocessor.inference(
net, id_data_loaders['val'])
ood_pred, ood_conf, ood_gt = postprocessor.inference(
net, ood_data_loaders['val'])
ood_gt = -1 * np.ones_like(ood_gt) # hard set to -1 as ood
pred = np.concatenate([id_pred, ood_pred])
conf = np.concatenate([id_conf, ood_conf])
label = np.concatenate([id_gt, ood_gt])
ood_metrics = compute_all_metrics(conf, label, pred)
val_auroc = ood_metrics[1]
return {'auroc': 100 * val_auroc}
def _save_csv(self, metrics, dataset_name):
[fpr, auroc, aupr_in, aupr_out,
ccr_4, ccr_3, ccr_2, ccr_1, accuracy] \
= metrics
write_content = {
'dataset': dataset_name,
'FPR@95': '{:.2f}'.format(100 * fpr),
'AUROC': '{:.2f}'.format(100 * auroc),
'AUPR_IN': '{:.2f}'.format(100 * aupr_in),
'AUPR_OUT': '{:.2f}'.format(100 * aupr_out),
'CCR_4': '{:.2f}'.format(100 * ccr_4),
'CCR_3': '{:.2f}'.format(100 * ccr_3),
'CCR_2': '{:.2f}'.format(100 * ccr_2),
'CCR_1': '{:.2f}'.format(100 * ccr_1),
'ACC': '{:.2f}'.format(100 * accuracy)
}
fieldnames = list(write_content.keys())
# print ood metric results
print('FPR@95: {:.2f}, AUROC: {:.2f}'.format(100 * fpr, 100 * auroc),
end=' ',
flush=True)
print('AUPR_IN: {:.2f}, AUPR_OUT: {:.2f}'.format(
100 * aupr_in, 100 * aupr_out),
flush=True)
print('CCR: {:.2f}, {:.2f}, {:.2f}, {:.2f},'.format(
ccr_4 * 100, ccr_3 * 100, ccr_2 * 100, ccr_1 * 100),
end=' ',
flush=True)
print('ACC: {:.2f}'.format(accuracy * 100), flush=True)
print(u'\u2500' * 70, flush=True)
csv_path = os.path.join(self.config.output_dir, 'ood.csv')
if not os.path.exists(csv_path):
with open(csv_path, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerow(write_content)
else:
with open(csv_path, 'a', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow(write_content)
def _save_scores(self, pred, conf, gt, save_name):
save_dir = os.path.join(self.config.output_dir, 'scores')
os.makedirs(save_dir, exist_ok=True)
np.savez(os.path.join(save_dir, save_name),
pred=pred,
conf=conf,
label=gt)
def eval_acc(self,
net: nn.Module,
data_loader: DataLoader,
postprocessor: BasePostprocessor = None,
epoch_idx: int = -1,
fsood: bool = False,
csid_data_loaders: DataLoader = None):
"""Returns the accuracy score of the labels and predictions.
:return: float
"""
if type(net) is dict:
net['backbone'].eval()
else:
net.eval()
self.id_pred, self.id_conf, self.id_gt = postprocessor.inference(
net, data_loader)
if fsood:
assert csid_data_loaders is not None
for dataset_name, csid_dl in csid_data_loaders.items():
csid_pred, csid_conf, csid_gt = postprocessor.inference(
net, csid_dl)
self.id_pred = np.concatenate([self.id_pred, csid_pred])
self.id_conf = np.concatenate([self.id_conf, csid_conf])
self.id_gt = np.concatenate([self.id_gt, csid_gt])
metrics = {}
metrics['acc'] = sum(self.id_pred == self.id_gt) / len(self.id_pred)
metrics['epoch_idx'] = epoch_idx
return metrics
def report(self, test_metrics):
print('Completed!', flush=True)
def hyperparam_search(
self,
net: nn.Module,
id_data_loader,
ood_data_loader,
postprocessor: BasePostprocessor,
):
print('Starting automatic parameter search...')
aps_dict = {}
max_auroc = 0
hyperparam_names = []
hyperparam_list = []
count = 0
for name in postprocessor.args_dict.keys():
hyperparam_names.append(name)
count += 1
for name in hyperparam_names:
hyperparam_list.append(postprocessor.args_dict[name])
hyperparam_combination = self.recursive_generator(
hyperparam_list, count)
for hyperparam in hyperparam_combination:
postprocessor.set_hyperparam(hyperparam)
id_pred, id_conf, id_gt = postprocessor.inference(
net, id_data_loader)
ood_pred, ood_conf, ood_gt = postprocessor.inference(
net, ood_data_loader)
ood_gt = -1 * np.ones_like(ood_gt) # hard set to -1 as ood
pred = np.concatenate([id_pred, ood_pred])
conf = np.concatenate([id_conf, ood_conf])
label = np.concatenate([id_gt, ood_gt])
ood_metrics = compute_all_metrics(conf, label, pred)
index = hyperparam_combination.index(hyperparam)
aps_dict[index] = ood_metrics[1]
print('Hyperparam:{}, auroc:{}'.format(hyperparam,
aps_dict[index]))
if ood_metrics[1] > max_auroc:
max_auroc = ood_metrics[1]
for key in aps_dict.keys():
if aps_dict[key] == max_auroc:
postprocessor.set_hyperparam(hyperparam_combination[key])
print('Final hyperparam: {}'.format(postprocessor.get_hyperparam()))
return max_auroc
def recursive_generator(self, list, n):
if n == 1:
results = []
for x in list[0]:
k = []
k.append(x)
results.append(k)
return results
else:
results = []
temp = self.recursive_generator(list, n - 1)
for x in list[n - 1]:
for y in temp:
k = y.copy()
k.append(x)
results.append(k)
return results
| 11,738 | 39.064846 | 79 | py |
null | OpenOOD-main/openood/evaluators/osr_evaluator.py | from typing import Dict
import torch.nn as nn
from torch.utils.data import DataLoader
from openood.postprocessors import BasePostprocessor
from openood.utils import Config
from .ood_evaluator import OODEvaluator
class OSREvaluator(OODEvaluator):
def __init__(self, config: Config):
super(OSREvaluator, self).__init__(config)
def eval_ood(self, net: nn.Module, id_data_loader: DataLoader,
ood_data_loaders: Dict[str, Dict[str, DataLoader]],
postprocessor: BasePostprocessor):
if type(net) is dict:
for subnet in net.values():
subnet.eval()
else:
net.eval()
# load training in-distribution data
assert 'test' in id_data_loader, \
'id_data_loaders should have the key: test!'
dataset_name = self.config.dataset.name
print(f'Performing inference on {dataset_name} dataset...', flush=True)
id_pred, id_conf, id_gt = postprocessor.inference(
net, id_data_loader['test'])
if self.config.recorder.save_scores:
self._save_scores(id_pred, id_conf, id_gt, dataset_name)
# load nearood data and compute ood metrics
self._eval_ood(net, [id_pred, id_conf, id_gt],
ood_data_loaders,
postprocessor,
ood_split='osr')
| 1,383 | 33.6 | 79 | py |
null | OpenOOD-main/openood/evaluators/patchcore_evaluator.py | import os
from typing import Dict
import cv2
import numpy as np
import torch
import torch.nn as nn
from PIL import Image
from scipy.ndimage import gaussian_filter
from sklearn.metrics import roc_auc_score
from torch.utils.data import DataLoader
from torchvision import transforms
from openood.postprocessors import BasePostprocessor
from openood.utils import Config
from .base_evaluator import BaseEvaluator
class PatchCoreEvaluator(BaseEvaluator):
def __init__(self, config: Config):
super(PatchCoreEvaluator, self).__init__(config)
self.config = config
def eval_ood(self, net: nn.Module, id_data_loader: DataLoader,
ood_data_loaders: Dict[str, Dict[str, DataLoader]],
postprocessor: BasePostprocessor):
net.eval()
dataset_name = self.config.dataset.name
print(f'Performing inference on {dataset_name} dataset...', flush=True)
id_pred, id_conf, id_gt = postprocessor.inference(
net, ood_data_loaders['val']) # not good
good_pred, good_conf, good_gt = postprocessor.inference(
net, id_data_loader['test']) # good
# pred = np.concatenate([id_pred, good_pred])
conf = np.concatenate([id_conf, good_conf])
gt = np.concatenate([id_gt, good_gt])
self.gt_transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.CenterCrop(224)
])
mean_train = [0.485, 0.456, 0.406]
std_train = [0.229, 0.224, 0.225]
self.transform = transforms.Compose([
transforms.Resize((256, 256), Image.ANTIALIAS),
transforms.ToTensor(),
transforms.CenterCrop(224),
transforms.Normalize(mean=mean_train, std=std_train)
])
count = 0
self.gt_list_px_lvl = []
for batch in id_data_loader['trainGT']:
#data = batch['data'].cuda()
data = []
label = batch['label'].cuda()
name = batch['image_name']
for i in name:
path = os.path.join('./data/images/', i)
gt_img = Image.open(path)
gt_img = self.gt_transform(gt_img)
gt_img = torch.unsqueeze(gt_img, 0)
# gt_img = self.gt_transform(gt_img)
gt_np = gt_img.cpu().numpy()[0, 0].astype(int)
self.gt_list_px_lvl.extend(gt_np.ravel())
self.pred_list_px_lvl = []
self.pred_list_img_lvl = []
for patchscore in conf:
anomaly_map = patchscore[:, 0].reshape((28, 28))
N_b = patchscore[np.argmax(patchscore[:, 0])]
w = (1 - (np.max(np.exp(N_b)) / np.sum(np.exp(N_b))))
score = w * max(patchscore[:, 0]) # Image-level score
anomaly_map_resized = cv2.resize(anomaly_map, (224, 224))
anomaly_map_resized_blur = gaussian_filter(anomaly_map_resized,
sigma=4)
self.pred_list_px_lvl.extend(anomaly_map_resized_blur.ravel())
self.pred_list_img_lvl.append(score)
print('Total image-level auc-roc score :')
img_auc = roc_auc_score(gt, self.pred_list_img_lvl)
print(img_auc)
if (test_pix):
print('Total pixel-level auc-roc score :')
pixel_auc = roc_auc_score(self.gt_list_px_lvl,
self.pred_list_px_lvl)
print(pixel_auc)
def eval_acc(self,
net: nn.Module,
data_loader: DataLoader,
postprocessor: BasePostprocessor = None,
epoch_idx: int = -1):
net.eval()
id_pred, _, id_gt = postprocessor.inference(net, data_loader)
metrics = {}
metrics['acc'] = sum(id_pred == id_gt) / len(id_pred)
metrics['epoch_idx'] = epoch_idx
return metrics
def report(self, test_metrics):
print('Completed!', flush=True)
| 4,030 | 34.991071 | 79 | py |
null | OpenOOD-main/openood/evaluators/utils.py | from openood.evaluators.mos_evaluator import MOSEvaluator
from openood.utils import Config
from .ad_evaluator import ADEvaluator
from .arpl_evaluator import ARPLEvaluator
from .base_evaluator import BaseEvaluator
from .ece_evaluator import ECEEvaluator
from .fsood_evaluator import FSOODEvaluator
from .ood_evaluator import OODEvaluator
from .osr_evaluator import OSREvaluator
from .patchcore_evaluator import PatchCoreEvaluator
def get_evaluator(config: Config):
evaluators = {
'base': BaseEvaluator,
'ood': OODEvaluator,
'fsood': FSOODEvaluator,
'patch': PatchCoreEvaluator,
'arpl': ARPLEvaluator,
'ad': ADEvaluator,
'mos': MOSEvaluator,
'ece': ECEEvaluator,
'osr': OSREvaluator
}
return evaluators[config.evaluator.name](config)
| 819 | 29.37037 | 57 | py |
null | OpenOOD-main/openood/losses/__init__.py | from .draem_loss import get_draem_losses
from .reweight import rew_ce, rew_sce
from .sce import soft_cross_entropy
| 115 | 28 | 40 | py |
null | OpenOOD-main/openood/losses/draem_loss.py | import torch
from .focal import FocalLoss
from .ssim import SSIM
def get_draem_losses():
losses = {
'l2': torch.nn.modules.loss.MSELoss(),
'ssim': SSIM(),
'focal': FocalLoss()
}
return losses
| 231 | 15.571429 | 46 | py |
null | OpenOOD-main/openood/losses/focal.py | import numpy as np
import torch
import torch.nn as nn
class FocalLoss(nn.Module):
def __init__(self,
apply_nonlin=None,
alpha=None,
gamma=2,
balance_index=0,
smooth=1e-5,
size_average=True):
super(FocalLoss, self).__init__()
self.apply_nonlin = apply_nonlin
self.alpha = alpha
self.gamma = gamma
self.balance_index = balance_index
self.smooth = smooth
self.size_average = size_average
if self.smooth is not None:
if self.smooth < 0 or self.smooth > 1.0:
raise ValueError('smooth value should be in [0,1]')
def forward(self, logit, target):
if self.apply_nonlin is not None:
logit = self.apply_nonlin(logit)
num_class = logit.shape[1]
if logit.dim() > 2:
# N,C,d1,d2 -> N,C,m (m=d1*d2*...)
logit = logit.view(logit.size(0), logit.size(1), -1)
logit = logit.permute(0, 2, 1).contiguous()
logit = logit.view(-1, logit.size(-1))
target = torch.squeeze(target, 1)
target = target.view(-1, 1)
alpha = self.alpha
if alpha is None:
alpha = torch.ones(num_class, 1)
elif isinstance(alpha, (list, np.ndarray)):
assert len(alpha) == num_class
alpha = torch.FloatTensor(alpha).view(num_class, 1)
alpha = alpha / alpha.sum()
elif isinstance(alpha, float):
alpha = torch.ones(num_class, 1)
alpha = alpha * (1 - self.alpha)
alpha[self.balance_index] = self.alpha
else:
raise TypeError('Not support alpha type')
if alpha.device != logit.device:
alpha = alpha.to(logit.device)
idx = target.cpu().long()
one_hot_key = torch.FloatTensor(target.size(0), num_class).zero_()
one_hot_key = one_hot_key.scatter_(1, idx, 1)
if one_hot_key.device != logit.device:
one_hot_key = one_hot_key.to(logit.device)
if self.smooth:
one_hot_key = torch.clamp(one_hot_key,
self.smooth / (num_class - 1),
1.0 - self.smooth)
pt = (one_hot_key * logit).sum(1) + self.smooth
logpt = pt.log()
gamma = self.gamma
alpha = alpha[idx]
alpha = torch.squeeze(alpha)
loss = -1 * alpha * torch.pow((1 - pt), gamma) * logpt
if self.size_average:
loss = loss.mean()
return loss
| 2,614 | 31.6875 | 74 | py |
null | OpenOOD-main/openood/losses/kdad_losses.py | import torch
from torch import nn
class MseDirectionLoss(nn.Module):
"""Define MSE + Direction loss."""
def __init__(self, lamda):
super(MseDirectionLoss, self).__init__()
self.lamda = lamda
self.criterion = nn.MSELoss()
self.similarity_loss = torch.nn.CosineSimilarity()
def forward(self, output_pred, output_real):
y_pred_0, y_pred_1, y_pred_2, y_pred_3 = output_pred[3], output_pred[
6], output_pred[9], output_pred[12]
y_0, y_1, y_2, y_3 = output_real[3], output_real[6], output_real[
9], output_real[12]
# different terms of loss
abs_loss_0 = self.criterion(y_pred_0, y_0)
loss_0 = torch.mean(1 - self.similarity_loss(
y_pred_0.view(y_pred_0.shape[0], -1), y_0.view(y_0.shape[0], -1)))
abs_loss_1 = self.criterion(y_pred_1, y_1)
loss_1 = torch.mean(1 - self.similarity_loss(
y_pred_1.view(y_pred_1.shape[0], -1), y_1.view(y_1.shape[0], -1)))
abs_loss_2 = self.criterion(y_pred_2, y_2)
loss_2 = torch.mean(1 - self.similarity_loss(
y_pred_2.view(y_pred_2.shape[0], -1), y_2.view(y_2.shape[0], -1)))
abs_loss_3 = self.criterion(y_pred_3, y_3)
loss_3 = torch.mean(1 - self.similarity_loss(
y_pred_3.view(y_pred_3.shape[0], -1), y_3.view(y_3.shape[0], -1)))
total_loss = loss_0 + loss_1 + loss_2 + loss_3 + self.lamda * (
abs_loss_0 + abs_loss_1 + abs_loss_2 + abs_loss_3)
return total_loss
class DirectionOnlyLoss(nn.Module):
"""Define Direction loss."""
def __init__(self):
super(DirectionOnlyLoss, self).__init__()
self.similarity_loss = torch.nn.CosineSimilarity()
def forward(self, output_pred, output_real):
y_pred_0, y_pred_1, y_pred_2, y_pred_3 = output_pred[3], output_pred[
6], output_pred[9], output_pred[12]
y_0, y_1, y_2, y_3 = output_real[3], output_real[6], output_real[
9], output_real[12]
# different terms of loss
loss_0 = torch.mean(1 - self.similarity_loss(
y_pred_0.view(y_pred_0.shape[0], -1), y_0.view(y_0.shape[0], -1)))
loss_1 = torch.mean(1 - self.similarity_loss(
y_pred_1.view(y_pred_1.shape[0], -1), y_1.view(y_1.shape[0], -1)))
loss_2 = torch.mean(1 - self.similarity_loss(
y_pred_2.view(y_pred_2.shape[0], -1), y_2.view(y_2.shape[0], -1)))
loss_3 = torch.mean(1 - self.similarity_loss(
y_pred_3.view(y_pred_3.shape[0], -1), y_3.view(y_3.shape[0], -1)))
total_loss = loss_0 + loss_1 + loss_2 + loss_3
return total_loss
| 2,666 | 40.671875 | 78 | py |
null | OpenOOD-main/openood/losses/rd4ad_loss.py | import torch
from torch import nn
def loss_function(a, b):
cos_loss = torch.nn.CosineSimilarity()
loss = 0
for item in range(len(a)):
loss += torch.mean(1-cos_loss(a[item].view(a[item].shape[0],-1),
b[item].view(b[item].shape[0],-1)))
return loss | 314 | 27.636364 | 73 | py |
null | OpenOOD-main/openood/losses/reweight.py | import torch
import torch.nn.functional as F
from .sce import soft_cross_entropy
def rew_ce(logits, labels, sample_weights):
losses = F.cross_entropy(logits, labels, reduction='none')
return (losses * sample_weights.type_as(losses)).mean()
def rew_sce(logits, soft_labels, sample_weights):
losses = soft_cross_entropy(logits, soft_labels, reduce=False)
return torch.mean(losses * sample_weights.type_as(losses))
| 433 | 27.933333 | 66 | py |
null | OpenOOD-main/openood/losses/sce.py | import torch
class SoftCrossEntropyFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, logit, label, weight=None):
assert logit.size() == label.size(), 'logit.size() != label.size()'
dim = logit.dim()
max_logit = logit.max(dim - 1, keepdim=True)[0]
logit = logit - max_logit
exp_logit = logit.exp()
exp_sum = exp_logit.sum(dim - 1, keepdim=True)
prob = exp_logit / exp_sum
log_exp_sum = exp_sum.log()
neg_log_prob = log_exp_sum - logit
if weight is None:
weighted_label = label
else:
if weight.size() != (logit.size(-1), ):
raise ValueError(
'since logit.size() = {}, '\
'weight.size() should be ({},), but got {}'
.format(
logit.size(),
logit.size(-1),
weight.size(),
))
size = [1] * label.dim()
size[-1] = label.size(-1)
weighted_label = label * weight.view(size)
ctx.save_for_backward(weighted_label, prob)
out = (neg_log_prob * weighted_label).sum(dim - 1)
return out
@staticmethod
def backward(ctx, grad_output):
weighted_label, prob = ctx.saved_tensors
old_size = weighted_label.size()
# num_classes
K = old_size[-1]
# batch_size
B = weighted_label.numel() // K
grad_output = grad_output.view(B, 1)
weighted_label = weighted_label.view(B, K)
prob = prob.view(B, K)
grad_input = grad_output * (prob * weighted_label.sum(1, True) -
weighted_label)
grad_input = grad_input.view(old_size)
return grad_input, None, None
def soft_cross_entropy(logit,
label,
weight=None,
reduce=None,
reduction='mean'):
if weight is not None and weight.requires_grad:
raise RuntimeError('gradient for weight is not supported')
losses = SoftCrossEntropyFunction.apply(logit, label, weight)
reduction = {
True: 'mean',
False: 'none',
None: reduction,
}[reduce]
if reduction == 'mean':
return losses.mean()
elif reduction == 'sum':
return losses.sum()
elif reduction == 'none':
return losses
else:
raise ValueError('invalid value for reduction: {}'.format(reduction))
class SoftCrossEntropyLoss(torch.nn.Module):
def __init__(self, weight=None, reduce=None, reduction='mean'):
super(SoftCrossEntropyLoss, self).__init__()
self.weight = weight
self.reduce = reduce
self.reduction = reduction
def forward(self, logit, label, weight=None):
if weight is None:
weight = self.weight
return soft_cross_entropy(logit, label, weight, self.reduce,
self.reduction)
| 3,036 | 33.123596 | 77 | py |
null | OpenOOD-main/openood/losses/ssim.py | from math import exp
import torch
import torch.nn.functional as F
def gaussian(window_size, sigma):
gauss = torch.Tensor([
exp(-(x - window_size // 2)**2 / float(2 * sigma**2))
for x in range(window_size)
])
return gauss / gauss.sum()
def create_window(window_size, channel=1):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(
_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(channel, 1, window_size,
window_size).contiguous()
return window
def ssim(img1,
img2,
window_size=11,
window=None,
size_average=True,
full=False,
val_range=None):
if val_range is None:
if torch.max(img1) > 128:
max_val = 255
else:
max_val = 1
if torch.min(img1) < -0.5:
min_val = -1
else:
min_val = 0
val_range = max_val - min_val
# else:
# l = val_range
padd = window_size // 2
(_, channel, height, width) = img1.size()
if window is None:
real_size = min(window_size, height, width)
window = create_window(real_size, channel=channel).to(img1.device)
mu1 = F.conv2d(img1, window, padding=padd, groups=channel)
mu2 = F.conv2d(img2, window, padding=padd, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=padd,
groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=padd,
groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=padd,
groups=channel) - mu1_mu2
c1 = (0.01 * val_range)**2
c2 = (0.03 * val_range)**2
v1 = 2.0 * sigma12 + c2
v2 = sigma1_sq + sigma2_sq + c2
cs = torch.mean(v1 / v2) # contrast sensitivity
ssim_map = ((2 * mu1_mu2 + c1) * v1) / ((mu1_sq + mu2_sq + c1) * v2)
if size_average:
ret = ssim_map.mean()
else:
ret = ssim_map.mean(1).mean(1).mean(1)
if full:
return ret, cs
return ret, ssim_map
class SSIM(torch.nn.Module):
# For DRAEM
def __init__(self, window_size=11, size_average=True, val_range=None):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.val_range = val_range
# Assume 1 channel for SSIM
self.channel = 1
self.window = create_window(window_size).cuda()
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.dtype == img1.dtype:
window = self.window
else:
window = create_window(self.window_size,
channel).to(img1.device).type(img1.dtype)
self.window = window
self.channel = channel
s_score, ssim_map = ssim(img1,
img2,
window=window,
window_size=self.window_size,
size_average=self.size_average)
return 1.0 - s_score
| 3,290 | 28.123894 | 76 | py |