Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
null | OpenOOD-main/scripts/ood/rotpred/imagenet_test_rotpred.sh | #!/bin/bash
# sh scripts/ood/rotpred/imagenet_test_rotpred.sh
############################################
# we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood_imagenet.py
# available architectures:
# resnet50
# ood
python scripts/eval_ood_imagenet.py \
--ckpt-path ./results/imagenet_rot_net_rotpred_e30_lr0.001_default/s0/best.ckpt \
--arch resnet50 \
--postprocessor rotpred \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood_imagenet.py \
--ckpt-path ./results/imagenet_rot_net_rotpred_e30_lr0.001_default/s0/best.ckpt \
--arch resnet50 \
--postprocessor rotpred \
--save-score --save-csv --fsood
| 701 | 27.08 | 83 | sh |
null | OpenOOD-main/scripts/ood/rotpred/imagenet_train_rotpred.sh | #!/bin/bash
# sh scripts/ood/rotpred/imagenet_train_rotpred.sh
# batch size is 64 otherwise will run out of GPU memory
python main.py \
--config configs/datasets/imagenet/imagenet.yml \
configs/networks/rot_net.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/base_preprocessor.yml \
--network.backbone.name resnet50 \
--network.backbone.pretrained True \
--network.backbone.checkpoint ./results/pretrained_weights/resnet50_imagenet1k_v1.pth \
--trainer.name rotpred \
--optimizer.lr 0.001 \
--optimizer.num_epochs 30 \
--dataset.train.batch_size 64 \
--num_gpus 2 --num_workers 16 \
--merge_option merge \
--seed 0
| 691 | 33.6 | 91 | sh |
null | OpenOOD-main/scripts/ood/sem/cifar100_test_ood_sem.sh | #!/bin/bash
# sh scripts/ood/sem/cifar100_test_ood_sem.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/gmm.yml \
--num_workers 8 \
--network.checkpoint 'results/cifar100_resnet18_32x32_sae_e100_lr0.05/best.ckpt' \
--mark 0
| 686 | 28.869565 | 82 | sh |
null | OpenOOD-main/scripts/ood/sem/cifar100_train_sem.sh | #!/bin/bash
# sh scripts/ood/sem/cifar100_train_sem.sh
#GPU=1
#CPU=1
#node=79
#jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} \
#-w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/preprocessors/base_preprocessor.yml \
configs/pipelines/train/train_sem.yml \
--optimizer.num_epochs 100 \
--network.pretrained False \
--network.checkpoint ./results/mnist_0408_3/mnist_lenet_base_e100_lr0.1/best_epoch77_acc0.9940.ckpt \
--num_workers 8 | 713 | 30.043478 | 101 | sh |
null | OpenOOD-main/scripts/ood/sem/cifar10_test_ood_sem.sh | #!/bin/bash
# sh scripts/ood/sem/cifar10_test_ood_sem.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/gmm.yml \
--num_workers 8 \
--network.checkpoint 'results/checkpoints/cifar10_res18_acc94.30.ckpt' \
--mark no_train
| 678 | 28.521739 | 73 | sh |
null | OpenOOD-main/scripts/ood/sem/cifar10_train_sem.sh | #!/bin/bash
# sh scripts/ood/sem/cifar10_train_sem.sh
# GPU=1
# CPU=1
# node=79
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/train_sem.yml \
configs/preprocessors/base_preprocessor.yml \
--num_workers 8 \
--network.checkpoint 'results/checkpoints/cifar10_res18_acc94.30.ckpt'
| 631 | 26.478261 | 70 | sh |
null | OpenOOD-main/scripts/ood/sem/imagenet_test_ood_sem.sh | #!/bin/bash
# sh scripts/ood/sem/imagenet_test_ood_sem.sh
GPU=1
CPU=1
node=76
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/imagenet/imagenet.yml \
configs/datasets/imagenet/imagenet_ood.yml \
configs/networks/resnet50.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/gmm.yml \
--num_workers 4 \
--ood_dataset.image_size 256 \
--dataset.test.batch_size 256 \
--dataset.val.batch_size 256 \
--network.pretrained True \
--network.checkpoint 'results/checkpoints/imagenet_res50_acc76.10.pth' \
--merge_option merge
| 790 | 28.296296 | 72 | sh |
null | OpenOOD-main/scripts/ood/sem/mnist_test_ood_sem.sh | #!/bin/bash
# sh scripts/ood/sem/mnist_test_ood_sem.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/mnist/mnist.yml \
configs/datasets/mnist/mnist_ood.yml \
configs/networks/lenet.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/gmm.yml \
--num_workers 8 \
--network.checkpoint 'results/checkpoints/mnist_lenet_acc99.30.ckpt' \
--mark no_train
| 657 | 27.608696 | 73 | sh |
null | OpenOOD-main/scripts/ood/sem/sweep_osr.py | # python scripts/ood/sem/sweep_osr.py
import os
config = [
# ['osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml', 'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt'],
[
'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml',
'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt'
],
# ['osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml', 'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt'],
# ['osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml', 'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt'],
]
for [dataset, ood_dataset, network, pth] in config:
command = (f"PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \
--cpus-per-task=1 --ntasks-per-node=1 \
--kill-on-bad-exit=1 --job-name=openood \
python main.py \
--config configs/datasets/{dataset} \
configs/datasets/{ood_dataset} \
configs/networks/{network}.yml \
configs/pipelines/test/test_osr.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/gmm.yml \
--network.pretrained True \
--network.checkpoint {pth} \
--num_workers 8 \
--merge_option merge &")
os.system(command)
| 1,262 | 39.741935 | 136 | py |
null | OpenOOD-main/scripts/ood/she/cifar100_test_ood_she.sh | #!/bin/bash
# sh scripts/ood/she/cifar100_test_ood_she.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/she.yml \
--network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt'
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar100 \
--root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \
--postprocessor she \
--save-score --save-csv
| 1,106 | 31.558824 | 95 | sh |
null | OpenOOD-main/scripts/ood/she/cifar10_test_ood_she.sh | #!/bin/bash
# sh scripts/ood/she/cifar10_test_ood_she.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/she.yml \
--num_workers 8 \
--network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \
--mark 1
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar10 \
--root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \
--postprocessor she \
--save-score --save-csv
| 1,135 | 30.555556 | 96 | sh |
null | OpenOOD-main/scripts/ood/she/imagenet200_test_ood_she.sh | #!/bin/bash
# sh scripts/ood/she/imagenet200_test_ood_she.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
# ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \
--postprocessor she \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \
--postprocessor she \
--save-score --save-csv --fsood
| 708 | 28.541667 | 74 | sh |
null | OpenOOD-main/scripts/ood/she/imagenet_test_ood_she.sh | #!/bin/bash
# sh scripts/ood/she/imagenet_test_ood_she.sh
GPU=1
CPU=1
node=63
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/imagenet/imagenet.yml \
configs/datasets/imagenet/imagenet_ood.yml \
configs/networks/resnet50.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/she.yml \
--num_workers 4 \
--ood_dataset.image_size 256 \
--dataset.test.batch_size 256 \
--dataset.val.batch_size 256 \
--network.pretrained True \
--network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \
--merge_option merge
############################################
# we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood_imagenet.py
# available architectures:
# resnet50, swin-t, vit-b-16
# ood
python scripts/eval_ood_imagenet.py \
--tvs-pretrained \
--arch resnet50 \
--postprocessor she \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood_imagenet.py \
--tvs-pretrained \
--arch resnet50 \
--postprocessor she \
--save-score --save-csv --fsood
| 1,386 | 27.895833 | 82 | sh |
null | OpenOOD-main/scripts/ood/ssd/cifar_10_test_ood_ssd.sh | #!/bin/bash
# sh scripts/ood/ssd/cifar_10_test_ood_ssd.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/simclr.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/mds.yml \
--num_workers 8 \
--network.checkpoint 'results/checkpoints/ssd/last.pth' \
--mark 0 \
--merge_option merge
| 673 | 25.96 | 73 | sh |
null | OpenOOD-main/scripts/ood/udg/cifar100_test_udg.sh | #!/bin/bash
# sh scripts/ood/udg/cifar100_test_udg.sh
GPU=1
CPU=1
node=63
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/udg_net.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--num_workers 8 \
--network.checkpoint 'results/cifar100_oe_udg_udg_e100_lr0.1_default/s0/best.ckpt' \
--mark 0
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar100 \
--root ./results/cifar100_oe_udg_udg_e100_lr0.1_default \
--postprocessor msp \
--save-score --save-csv
| 1,095 | 30.314286 | 88 | sh |
null | OpenOOD-main/scripts/ood/udg/cifar100_train_udg.sh | #!/bin/bash
# sh scripts/ood/udg/cifar100_train_udg.sh
GPU=1
CPU=1
node=73
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
-w SG-IDC1-10-51-2-${node} \
python -m pdb -c continue main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_oe.yml \
configs/networks/udg_net.yml \
configs/preprocessors/base_preprocessor.yml \
configs/pipelines/train/baseline.yml \
configs/pipelines/train/train_udg.yml \
--dataset.train.dataset_class UDGDataset \
--dataset.oe.dataset_class UDGDataset \
--network.backbone.name resnet18_32x32 \
--network.pretrained False \
--seed 0
| 782 | 29.115385 | 49 | sh |
null | OpenOOD-main/scripts/ood/udg/cifar10_test_udg.sh | #!/bin/bash
# sh scripts/ood/udg/cifar10_test_udg.sh
GPU=1
CPU=1
node=63
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/udg_net.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--num_workers 8 \
--network.checkpoint 'results/cifar10_oe_udg_udg_e100_lr0.1_default/s0/best.ckpt' \
--mark 0
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar10 \
--root ./results/cifar10_oe_udg_udg_e100_lr0.1_default \
--postprocessor msp \
--save-score --save-csv
| 1,087 | 30.085714 | 87 | sh |
null | OpenOOD-main/scripts/ood/udg/cifar10_train_udg.sh | #!/bin/bash
# sh scripts/ood/udg/cifar10_train_udg.sh
GPU=1
CPU=1
node=73
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
python -m pdb -c continue main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_oe.yml \
configs/networks/udg_net.yml \
configs/preprocessors/base_preprocessor.yml \
configs/pipelines/train/baseline.yml \
configs/pipelines/train/train_udg.yml \
--dataset.train.dataset_class UDGDataset \
--dataset.oe.dataset_class UDGDataset \
--network.backbone.name resnet18_32x32 \
--network.pretrained False \
--seed 0
| 752 | 29.12 | 51 | sh |
null | OpenOOD-main/scripts/ood/udg/imagenet200_test_udg.sh | #!/bin/bash
# sh scripts/ood/udg/imagenet200_test_udg.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
# ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_oe_udg_udg_e90_lr0.1_default \
--postprocessor msp \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_oe_udg_udg_e90_lr0.1_default \
--postprocessor msp \
--save-score --save-csv --fsood
| 674 | 27.125 | 62 | sh |
null | OpenOOD-main/scripts/ood/udg/imagenet200_train_udg.sh | #!/bin/bash
# sh scripts/ood/udg/imagenet200_train_udg.sh
# UDG trainer cannot work with multiple GPUs currently
python main.py \
--config configs/datasets/imagenet200/imagenet200.yml \
configs/datasets/imagenet200/imagenet200_oe.yml \
configs/networks/udg_net.yml \
configs/pipelines/train/baseline.yml \
configs/pipelines/train/train_udg.yml \
configs/preprocessors/base_preprocessor.yml \
--dataset.train.dataset_class UDGDataset \
--dataset.oe.dataset_class UDGDataset \
--network.backbone.name resnet18_224x224 \
--network.pretrained False \
--optimizer.num_epochs 90 \
--dataset.train.batch_size 256 \
--dataset.oe.batch_size 512 \
--num_gpus 1 --num_workers 16 \
--merge_option merge \
--seed 0
| 767 | 33.909091 | 59 | sh |
null | OpenOOD-main/scripts/ood/vim/cifar100_test_ood_vim.sh | #!/bin/bash
# sh scripts/ood/vim/cifar100_test_ood_vim.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/vim.yml \
--num_workers 8 \
--network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \
--mark 0 \
--postprocessor.postprocessor_args.dim 256
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar100 \
--root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \
--postprocessor vim \
--save-score --save-csv
| 1,175 | 30.783784 | 97 | sh |
null | OpenOOD-main/scripts/ood/vim/cifar10_test_ood_vim.sh | #!/bin/bash
# sh scripts/ood/vim/cifar10_test_ood_vim.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/vim.yml \
--num_workers 8 \
--network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \
--mark 0 \
--postprocessor.postprocessor_args.dim 256
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar10 \
--root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \
--postprocessor vim \
--save-score --save-csv
| 1,167 | 30.567568 | 96 | sh |
null | OpenOOD-main/scripts/ood/vim/imagenet200_test_ood_vim.sh | #!/bin/bash
# sh scripts/ood/vim/imagenet200_test_ood_vim.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
# ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \
--postprocessor vim \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \
--postprocessor vim \
--save-score --save-csv --fsood
| 708 | 28.541667 | 74 | sh |
null | OpenOOD-main/scripts/ood/vim/imagenet_test_ood_vim.sh | #!/bin/bash
# sh scripts/ood/vim/imagenet_test_ood_vim.sh
GPU=1
CPU=1
node=73
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/imagenet/imagenet.yml \
configs/datasets/imagenet/imagenet_ood.yml \
configs/networks/resnet50.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/vim.yml \
--num_workers 4 \
--ood_dataset.image_size 256 \
--dataset.test.batch_size 256 \
--dataset.val.batch_size 256 \
--network.pretrained True \
--network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \
--postprocessor.postprocessor_args.dim 1000 \
--merge_option merge
############################################
# we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood_imagenet.py
# available architectures:
# resnet50, swin-t, vit-b-16
# ood
python scripts/eval_ood_imagenet.py \
--tvs-pretrained \
--arch resnet50 \
--postprocessor vim \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood_imagenet.py \
--tvs-pretrained \
--arch resnet50 \
--postprocessor vim \
--save-score --save-csv --fsood
| 1,428 | 28.163265 | 82 | sh |
null | OpenOOD-main/scripts/ood/vim/mnist_test_osr_vim.sh | #!/bin/bash
# sh scripts/ood/vim/mnist_test_osr_vim.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p mediasuper -x SZ-IDC1-10-112-2-17 --gres=gpu:${GPU} \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/osr_mnist6/mnist6_seed1.yml \
configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \
configs/networks/lenet.yml \
configs/pipelines/test/test_osr.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/vim.yml \
--num_workers 8 \
--network.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \
--mark 0 \
--postprocessor.postprocessor_args.dim 42
| 702 | 27.12 | 66 | sh |
null | OpenOOD-main/scripts/ood/vim/sweep_osr.py | # python scripts/ood/vim/sweep_osr.py
import os
config = [
[
'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml',
'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt'
],
[
'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml',
'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt'
],
[
'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml',
'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt'
],
[
'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml',
'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt'
],
]
for [dataset, ood_dataset, network, pth] in config:
command = (f"PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \
--cpus-per-task=1 --ntasks-per-node=1 \
--kill-on-bad-exit=1 --job-name=openood \
python main.py \
--config configs/datasets/{dataset} \
configs/datasets/{ood_dataset} \
configs/networks/{network}.yml \
configs/pipelines/test/test_osr.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/vim.yml \
--network.pretrained True \
--network.checkpoint {pth} \
--postprocessor.postprocessor_args.dim 128 \
--num_workers 8 \
--merge_option merge &")
os.system(command)
| 1,371 | 32.463415 | 77 | py |
null | OpenOOD-main/scripts/ood/vos/cifar100_test_vos.sh | #!/bin/bash
# sh scripts/ood/vos/cifar100_test_vos.sh
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/ebo.yml \
--num_workers 8 \
--network.pretrained True \
--network.checkpoint 'results/cifar100_resnet18_32x32_vos_e100_lr0.1_default/s0/best.ckpt' \
--mark 0
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar100 \
--root ./results/cifar100_resnet18_32x32_vos_e100_lr0.1_default \
--postprocessor ebo \
--save-score --save-csv
| 1,116 | 35.032258 | 96 | sh |
null | OpenOOD-main/scripts/ood/vos/cifar100_train_vos.sh | #!/bin/bash
# sh scripts/ood/vos/cifar100_train_vos.sh
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/train_vos.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/ebo.yml \
--num_workers 8 \
--optimizer.num_epochs 100 \
--merge_option merge \
--seed 0
| 597 | 32.222222 | 72 | sh |
null | OpenOOD-main/scripts/ood/vos/cifar10_test_vos.sh | #!/bin/bash
# sh scripts/ood/vos/cifar10_test_vos.sh
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/ebo.yml \
--num_workers 8 \
--network.pretrained True \
--network.checkpoint 'results/cifar10_resnet18_32x32_vos_e100_lr0.1_default/s0/best.ckpt' \
--mark vos
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar10 \
--root ./results/cifar10_resnet18_32x32_vos_e100_lr0.1_default \
--postprocessor ebo \
--save-score --save-csv
| 1,110 | 34.83871 | 95 | sh |
null | OpenOOD-main/scripts/ood/vos/cifar10_train_vos.sh | #!/bin/bash
# sh scripts/ood/vos/cifar10_train_vos.sh
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/train_vos.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/ebo.yml \
--num_workers 8 \
--optimizer.num_epochs 100 \
--merge_option merge \
--seed 0
| 594 | 32.055556 | 72 | sh |
null | OpenOOD-main/scripts/ood/vos/imagenet200_test_vos.sh | #!/bin/bash
# sh scripts/ood/vos/imagenet200_test_vos.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
# ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_vos_e90_lr0.1_default \
--postprocessor ebo \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_vos_e90_lr0.1_default \
--postprocessor ebo \
--save-score --save-csv --fsood
| 694 | 27.958333 | 72 | sh |
null | OpenOOD-main/scripts/ood/vos/imagenet200_train_vos.sh | #!/bin/bash
# sh scripts/ood/vos/imagenet200_train_vos.sh
python main.py \
--config configs/datasets/imagenet200/imagenet200.yml \
configs/networks/resnet18_224x224.yml \
configs/pipelines/train/train_vos.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/ebo.yml \
--optimizer.num_epochs 90 \
--dataset.train.batch_size 128 \
--num_gpus 2 --num_workers 16 \
--merge_option merge \
--seed 0
| 456 | 29.466667 | 59 | sh |
null | OpenOOD-main/scripts/ood/vos/imagenet_train_vos.sh | #!/bin/bash
# sh scripts/ood/vos/imagenet_train_vos.sh
# we observed CUDA OOM error on Quadro RTX 6000 24GB GPUs
python main.py \
--config configs/datasets/imagenet/imagenet.yml \
configs/networks/resnet50.yml \
configs/pipelines/train/train_vos.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/ebo.yml \
--network.pretrained True \
--network.checkpoint ./results/pretrained_weights/resnet50_imagenet1k_v1.pth \
--feature_dim 2048 \
--optimizer.lr 0.001 \
--optimizer.num_epochs 30 \
--dataset.train.batch_size 128 \
--num_gpus 2 --num_workers 16 \
--merge_option merge \
--seed ${SEED}
| 670 | 32.55 | 82 | sh |
null | OpenOOD-main/scripts/osr/arpl/2_arpl_test.sh | #!/bin/bash
# sh scripts/c_ood/0_mnist_test_ood_msp.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
# PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/digits/mnist.yml \
configs/datasets/digits/mnist_ood.yml \
configs/networks/arpl_net.yml \
configs/pipelines/test/test_arpl.yml \
configs/postprocessors/msp.yml \
--num_workers 8 \
--mark 0
| 541 | 24.809524 | 73 | sh |
null | OpenOOD-main/scripts/osr/arpl/2_arpl_train.sh | #!/bin/bash
# sh scripts/0_basics/cifar10_train.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
# PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/digits/mnist.yml \
configs/networks/arpl_net.yml \
configs/pipelines/train/train_arpl.yml \
--optimizer.num_epochs 100 \
--num_workers 8
| 488 | 23.45 | 51 | sh |
null | OpenOOD-main/scripts/osr/arpl/2_arplgan_test.sh | #!/bin/bash
# sh scripts/c_ood/0_mnist_test_ood_msp.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
# PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/digits/mnist.yml \
configs/datasets/digits/mnist_ood.yml \
configs/networks/arpl_gan.yml \
configs/pipelines/test/test_arplgan.yml \
configs/postprocessors/msp.yml \
--dataset.image_size 32 \
--num_workers 8 \
--mark 0
| 570 | 24.954545 | 73 | sh |
null | OpenOOD-main/scripts/osr/arpl/2_arplgan_train.sh | #!/bin/bash
# sh scripts/0_basics/cifar10_train.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
# PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/digits/mnist.yml \
configs/networks/arpl_gan.yml \
configs/pipelines/train/train_arpl_gan.yml \
--dataset.image_size 32 \
--optimizer.num_epochs 100 \
--num_workers 8
| 519 | 22.636364 | 51 | sh |
null | OpenOOD-main/scripts/osr/arpl/cifar100_test_ood_arpl.sh | #!/bin/bash
# sh scripts/osr/arpl/cifar100_test_ood_arpl.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
# PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
# this method needs to load multiple networks, please set the checkpoints in test_pipeling config file
# need to manually change the checkpoint path in configs/pipelines/test/test_arpl.yml
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/arpl_net.yml \
configs/pipelines/test/test_arpl.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--network.feat_extract_network.name resnet18_32x32 \
--num_workers 8 \
--seed 0
| 881 | 32.923077 | 102 | sh |
null | OpenOOD-main/scripts/osr/arpl/cifar100_train_arpl.sh | #!/bin/bash
# sh scripts/osr/arpl/cifar100_train_arpl.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
# PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/networks/arpl_net.yml \
configs/pipelines/train/train_arpl.yml \
configs/preprocessors/base_preprocessor.yml \
--network.feat_extract_network.name resnet18_32x32 \
--num_workers 8 \
--optimizer.num_epochs 100 \
--seed 0
| 641 | 26.913043 | 56 | sh |
null | OpenOOD-main/scripts/osr/arpl/cifar10_test_ood_arpl.sh | #!/bin/bash
# sh scripts/osr/arpl/cifar10_test_ood_arpl.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
# PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
# this method needs to load multiple networks, please set the checkpoints in test_pipeling config file
# need to manually change the checkpoint path in configs/pipelines/test/test_arpl.yml
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/arpl_net.yml \
configs/pipelines/test/test_arpl.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--network.feat_extract_network.name resnet18_32x32 \
--num_workers 8 \
--seed 0
| 876 | 32.730769 | 102 | sh |
null | OpenOOD-main/scripts/osr/arpl/cifar10_train_arpl.sh | #!/bin/bash
# sh scripts/osr/arpl/cifar10_train_arpl.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
# PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/networks/arpl_net.yml \
configs/pipelines/train/train_arpl.yml \
configs/preprocessors/base_preprocessor.yml \
--network.feat_extract_network.name resnet18_32x32 \
--num_workers 8 \
--optimizer.num_epochs 100 \
--seed 0
| 638 | 26.782609 | 56 | sh |
null | OpenOOD-main/scripts/osr/arpl/imagenet200_test_ood_arpl.sh | #!/bin/bash
# sh scripts/osr/arpl/imagenet200_test_ood_arpl.sh
# NOTE!!!!
# need to manually change the checkpoint path in configs/pipelines/test/test_arpl.yml
SCHEME="ood" # "ood" or "fsood"
python main.py \
--config configs/datasets/imagenet200/imagenet200.yml \
configs/datasets/imagenet200/imagenet200_${SCHEME}.yml \
configs/networks/arpl_net.yml \
configs/pipelines/test/test_arpl.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--network.feat_extract_network.name resnet18_224x224 \
--num_workers 8 \
--evaluator.ood_scheme ${SCHEME} \
--seed 0
| 630 | 34.055556 | 85 | sh |
null | OpenOOD-main/scripts/osr/arpl/imagenet200_train_arpl.sh | #!/bin/bash
# sh scripts/osr/arpl/imagenet200_train_arpl.sh
python main.py \
--config configs/datasets/imagenet200/imagenet200.yml \
configs/networks/arpl_net.yml \
configs/pipelines/train/train_arpl.yml \
configs/preprocessors/base_preprocessor.yml \
--network.feat_extract_network.name resnet18_224x224 \
--optimizer.num_epochs 90 \
--dataset.train.batch_size 128 \
--num_gpus 2 --num_workers 16 \
--merge_option merge \
--seed 0
| 473 | 30.6 | 59 | sh |
null | OpenOOD-main/scripts/osr/arpl/imagenet_test_ood_arpl.sh | #!/bin/bash
# sh scripts/osr/arpl/imagenet_test_ood_arpl.sh
# NOTE!!!!
# need to manually change the checkpoint path in configs/pipelines/test/test_arpl.yml
SCHEME="fsood" # "ood" or "fsood"
python main.py \
--config configs/datasets/imagenet/imagenet.yml \
configs/datasets/imagenet/imagenet_${SCHEME}.yml \
configs/networks/arpl_net.yml \
configs/pipelines/test/test_arpl.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--network.feat_extract_network.name resnet50 \
--num_workers 8 \
--evaluator.ood_scheme ${SCHEME} \
--seed 0
| 609 | 32.888889 | 85 | sh |
null | OpenOOD-main/scripts/osr/arpl/imagenet_train_arpl.sh | #!/bin/bash
# sh scripts/osr/arpl/imagenet200_train_arpl.sh
python main.py \
--config configs/datasets/imagenet/imagenet.yml \
configs/networks/arpl_net.yml \
configs/pipelines/train/train_arpl.yml \
configs/preprocessors/base_preprocessor.yml \
--network.feat_extract_network.name resnet50 \
--network.feat_extract_network.pretrained True \
--network.feat_extract_network.checkpoint ./results/pretrained_weights/resnet50_imagenet1k_v1.pth \
--optimizer.lr 0.001 \
--optimizer.num_epochs 30 \
--dataset.train.batch_size 128 \
--num_gpus 2 --num_workers 16 \
--merge_option merge \
--seed 0
| 643 | 34.777778 | 103 | sh |
null | OpenOOD-main/scripts/osr/opengan/cifar100_test_ood_opengan.sh | #!/bin/bash
# sh scripts/osr/opengan/cifar100_test_ood_opengan.sh
# NOTE!!!!
# need to manually change the network checkpoint path (not backbone) in configs/pipelines/test/test_opengan.yml
# corresponding to different runs
SEED=0
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/opengan.yml \
configs/pipelines/test/test_opengan.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/opengan.yml \
--num_workers 8 \
--network.backbone.pretrained True \
--network.backbone.checkpoint ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s${SEED}/best.ckpt \
--seed ${SEED}
| 718 | 36.842105 | 112 | sh |
null | OpenOOD-main/scripts/osr/opengan/cifar100_train_opengan.sh | #!/bin/bash
# sh scripts/osr/opengan/cifar100_train_opengan.sh
SEED=0
# feature extraction
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/train_opengan_feat_extract.yml \
configs/preprocessors/base_preprocessor.yml \
--network.checkpoint "./results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s${SEED}/best.ckpt" \
--seed ${SEED}
# train
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/networks/opengan.yml \
configs/pipelines/train/train_opengan.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/opengan.yml \
--dataset.feat_root ./results/cifar100_resnet18_32x32_feat_extract_opengan_default/s${SEED} \
--network.backbone.pretrained True \
--network.backbone.checkpoint ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s${SEED}/best.ckpt \
--seed ${SEED}
| 1,016 | 36.666667 | 112 | sh |
null | OpenOOD-main/scripts/osr/opengan/cifar10_test_ood_opengan.sh | #!/bin/bash
# sh scripts/osr/opengan/cifar10_test_ood_opengan.sh
# NOTE!!!!
# need to manually change the network checkpoint path (not backbone) in configs/pipelines/test/test_opengan.yml
# corresponding to different runs
SEED=0
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/opengan.yml \
configs/pipelines/test/test_opengan.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/opengan.yml \
--num_workers 8 \
--network.backbone.pretrained True \
--network.backbone.checkpoint ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s${SEED}/best.ckpt \
--seed ${SEED}
| 712 | 36.526316 | 111 | sh |
null | OpenOOD-main/scripts/osr/opengan/cifar10_train_opengan.sh | #!/bin/bash
# sh scripts/osr/opengan/cifar10_train_opengan.sh
SEED=0
# feature extraction
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/train_opengan_feat_extract.yml \
configs/preprocessors/base_preprocessor.yml \
--network.checkpoint "./results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s${SEED}/best.ckpt" \
--seed ${SEED}
# train
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/networks/opengan.yml \
configs/pipelines/train/train_opengan.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/opengan.yml \
--dataset.feat_root ./results/cifar10_resnet18_32x32_feat_extract_opengan_default/s${SEED} \
--network.backbone.pretrained True \
--network.backbone.checkpoint ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s${SEED}/best.ckpt \
--seed ${SEED}
| 1,006 | 36.296296 | 111 | sh |
null | OpenOOD-main/scripts/osr/opengan/feature_extract.sh | #!/bin/bash
# sh scripts/osr/opengan/feature_extract.sh
# GPU=1
# CPU=1
# node=36
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/feat_extract.yml \
configs/preprocessors/base_preprocessor.yml \
--network.checkpoint "results/cifar100_resnet18_32x32_base_e100_lr0.1/best.ckpt" \
--pipeline.extract_target train \
--merge_option merge \
--num_workers 8 \
--mark 0
| 664 | 27.913043 | 82 | sh |
null | OpenOOD-main/scripts/osr/opengan/imagenet200_test_ood_opengan.sh | #!/bin/bash
# sh scripts/osr/opengan/imagenet200_test_ood_opengan.sh
# NOTE!!!!
# need to manually change the network checkpoint path (not backbone) in configs/pipelines/test/test_opengan.yml
# corresponding to different runs
SEED=0
SCHEME="ood" # "ood" or "fsood"
python main.py \
--config configs/datasets/imagenet200/imagenet200.yml \
configs/datasets/imagenet200/imagenet200_${SCHEME}.yml \
configs/networks/opengan.yml \
configs/pipelines/test/test_opengan.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/opengan.yml \
--num_workers 8 \
--network.backbone.name resnet18_224x224 \
--network.backbone.pretrained True \
--network.backbone.checkpoint ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default/s${SEED}/best.ckpt \
--evaluator.ood_scheme ${SCHEME} \
--seed ${SEED}
| 861 | 38.181818 | 116 | sh |
null | OpenOOD-main/scripts/osr/opengan/imagenet200_train_opengan.sh | #!/bin/bash
# sh scripts/osr/opengan/imagenet200_train_opengan.sh
SEED=0
# feature extraction
python main.py \
--config configs/datasets/imagenet200/imagenet200.yml \
configs/datasets/imagenet200/imagenet200_ood.yml \
configs/networks/resnet18_224x224.yml \
configs/pipelines/train/train_opengan_feat_extract.yml \
configs/preprocessors/base_preprocessor.yml \
--network.checkpoint "./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default/s${SEED}/best.ckpt" \
--seed ${SEED}
# train
python main.py \
--config configs/datasets/imagenet200/imagenet200.yml \
configs/networks/opengan.yml \
configs/pipelines/train/train_opengan.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/opengan.yml \
--dataset.feat_root ./results/imagenet200_resnet18_224x224_feat_extract_opengan_default/s${SEED} \
--network.backbone.pretrained True \
--network.backbone.checkpoint ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default/s${SEED}/best.ckpt \
--optimizer.num_epochs 90 \
--seed ${SEED}
| 1,084 | 37.75 | 116 | sh |
null | OpenOOD-main/scripts/osr/openmax/cifar100_test_ood_openmax.sh | #!/bin/bash
# sh scripts/osr/openmax/cifar100_test_ood_openmax.sh
# GPU=1
# CPU=1
# node=30
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/openmax.yml \
--num_workers 8 \
--network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \
--mark 0
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar100 \
--root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \
--postprocessor openmax \
--save-score --save-csv
| 979 | 28.69697 | 97 | sh |
null | OpenOOD-main/scripts/osr/openmax/cifar10_test_ood_openmax.sh | #!/bin/bash
# sh scripts/osr/openmax/cifar10_test_ood_openmax.sh
# GPU=1
# CPU=1
# node=30
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/openmax.yml \
--num_workers 8 \
--network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \
--mark 0
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar10 \
--root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \
--postprocessor openmax \
--save-score --save-csv
| 971 | 28.454545 | 96 | sh |
null | OpenOOD-main/scripts/osr/openmax/imagenet200_test_ood_openmax.sh | #!/bin/bash
# sh scripts/ood/openmax/imagenet200_test_ood_openmax.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
# ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \
--postprocessor openmax \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \
--postprocessor openmax \
--save-score --save-csv --fsood
| 724 | 29.208333 | 74 | sh |
null | OpenOOD-main/scripts/osr/openmax/imagenet_test_ood_openmax.sh | #!/bin/bash
# sh scripts/osr/openmax/imagenet_test_ood_openmax.sh
GPU=1
CPU=1
node=63
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/imagenet/imagenet.yml \
configs/datasets/imagenet/imagenet_ood.yml \
configs/networks/resnet50.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/openmax.yml \
--num_workers 10 \
--ood_dataset.image_size 256 \
--dataset.test.batch_size 256 \
--dataset.val.batch_size 256 \
--network.pretrained True \
--network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \
--merge_option merge
############################################
# we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood_imagenet.py
# available architectures:
# resnet50, swin-t, vit-b-16
# ood
python scripts/eval_ood_imagenet.py \
--tvs-pretrained \
--arch resnet50 \
--postprocessor openmax \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood_imagenet.py \
--tvs-pretrained \
--arch resnet50 \
--postprocessor openmax \
--save-score --save-csv --fsood
| 1,399 | 28.166667 | 82 | sh |
null | OpenOOD-main/scripts/osr/openmax/mnist_test_ood_openmax.sh | #!/bin/bash
# sh scripts/osr/openmax/mnist_test_ood_openmax.sh
# GPU=1
# CPU=1
# node=30
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
python main.py \
--config configs/datasets/mnist/mnist.yml \
configs/datasets/mnist/mnist_ood.yml \
configs/networks/lenet.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/openmax.yml \
--num_workers 8 \
--network.checkpoint 'results/checkpoints/mnist_lenet_acc99.30.ckpt' \
--mark 0
| 487 | 22.238095 | 70 | sh |
null | OpenOOD-main/scripts/osr/openmax/mnist_test_osr_openmax.sh | #!/bin/bash
# sh scripts/osr/openmax/mnist_test_osr_openmax.sh
# GPU=1
# CPU=1
# node=30
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/osr_mnist6/mnist6_seed1.yml \
configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \
configs/networks/lenet.yml \
configs/pipelines/test/test_osr.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/openmax.yml \
--num_workers 8 \
--network.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \
--mark 0
| 683 | 27.5 | 73 | sh |
null | OpenOOD-main/scripts/osr/openmax/sweep_osr.py | # python scripts/osr/openmax/sweep_osr.py
import os
config = [
[
'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml',
'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt'
],
[
'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml',
'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt'
],
[
'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml',
'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt'
],
[
'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml',
'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt'
],
]
for [dataset, ood_dataset, network, pth] in config:
command = (f"PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \
--cpus-per-task=1 --ntasks-per-node=1 \
--kill-on-bad-exit=1 --job-name=openood \
python main.py \
--config configs/datasets/{dataset} \
configs/datasets/{ood_dataset} \
configs/networks/{network}.yml \
configs/pipelines/test/test_osr.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/openmax.yml \
--network.pretrained True \
--network.checkpoint {pth} \
--num_workers 8 \
--merge_option merge &")
os.system(command)
| 1,330 | 32.275 | 77 | py |
null | OpenOOD-main/scripts/sweep/sweep_hyperparam.py | import argparse
import os
# dictionary with keywords from benchmarks
network_dict = {
'mnist': 'lenet',
'mnist6': 'lenet',
'cifar10': 'resnet18_32x32',
'cifar6': 'resnet18_32x32',
'cifar100': 'resnet18_32x32',
'cifar50': 'resnet18_32x32',
'imagenet': 'resnet50',
'tin20': 'resnet18_64x64'
}
checkpoint_dict = {
'mnist': './results/checkpoints/mnist_lenet_acc98.50.ckpt',
'cifar10': './results/checkpoints/cifar10_res18_acc95.24.ckpt',
'cifar100': './results/checkpoints/cifar100_res18_acc77.10.ckpt',
'imagenet': './results/checkpoints/imagenet_res50_acc76.17.pth',
'mnist6': './results/checkpoints/osr/mnist6',
'cifar6': './results/checkpoints/osr/cifar6',
'cifar50': './results/checkpoints/osr/cifar50',
'tin20': './results/checkpoints/osr/tin20',
}
method_dict = {
'msp':
None,
'odin': [
'--postprocessor.postprocessor_args.temperature 1',
'--postprocessor.postprocessor_args.temperature 100',
'--postprocessor.postprocessor_args.temperature 1000'
],
'mds':
None,
'gram':
None,
}
def make_args_list(benchmarks, methods, metrics):
args_list = []
for benchmark in benchmarks:
for method in methods:
for metric in metrics:
args_list.append([benchmark, method, metric])
return args_list
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run a sweep')
parser.add_argument('--benchmarks',
nargs='+',
default=['mnist', 'cifar10', 'cifar100', 'imagenet'])
parser.add_argument('--methods', nargs='+', default=['msp'])
parser.add_argument('--metrics', nargs='+', default=['acc'])
parser.add_argument('--output-dir', type=str, default='./results/')
parser.add_argument('--launcher',
default='local',
choices=['local', 'slurm'])
args = parser.parse_args()
# different command with different job schedulers
if args.launcher == 'slurm':
command_prefix = ("PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \
--cpus-per-task=1 --ntasks-per-node=1 \
--kill-on-bad-exit=1 -w SG-IDC1-10-51-2-79 ")
else:
command_prefix = "PYTHONPATH='.':$PYTHONPATH "
args_list = make_args_list(args.benchmarks, args.methods, args.metrics)
print(f'{len(args_list)} experiments have been setup...', flush=True)
for exp_id, [benchmark, method, metric] in enumerate(args_list):
print(f'Experiment #{exp_id} Starts...', flush=True)
print(f'Config: {benchmark}, {method}, {metric}', flush=True)
if metric in ['ood', 'fsood']:
command = (f'python main.py --config \
configs/datasets/{benchmark}/{benchmark}.yml \
configs/datasets/{benchmark}/{benchmark}_{metric}.yml \
configs/preprocessors/base_preprocessor.yml \
configs/networks/{network_dict[benchmark]}.yml \
configs/pipelines/test/test_{metric}.yml \
configs/postprocessors/{method}.yml \
--network.checkpoint {checkpoint_dict[benchmark]} \
--output_dir {args.output_dir}')
elif metric == 'osr':
for sid in range(1, 6):
print(f'5 OSR Exp, {sid} out of 5', flush=True)
command = (f'python main.py --config \
configs/datasets/osr_{benchmark}/{benchmark}_seed{sid}.yml \
configs/datasets/osr_{benchmark}/{benchmark}_seed{sid}_osr.yml \
configs/preprocessors/base_preprocessor.yml \
configs/networks/{network_dict[benchmark]}.yml \
configs/pipelines/test/test_osr.yml \
configs/postprocessors/{method}.yml \
--network.checkpoint {checkpoint_dict[benchmark]}_seed{sid}.ckpt \
--output_dir {args.output_dir}')
os.system(command_prefix + command)
elif metric in ['acc', 'ece']:
command = (f'python main.py --config \
configs/datasets/{benchmark}/{benchmark}.yml \
configs/preprocessors/base_preprocessor.yml \
configs/networks/{network_dict[benchmark]}.yml \
configs/pipelines/test/test_{metric}.yml \
configs/postprocessors/{method}.yml \
--network.checkpoint {checkpoint_dict[benchmark]} \
--output_dir {args.output_dir}')
os.system(command_prefix + command)
else:
raise ValueError('Unexpected Metric...')
| 4,645 | 40.115044 | 82 | py |
null | OpenOOD-main/scripts/sweep/sweep_posthoc.py | import argparse
import csv
import os
import numpy as np
from write_metrics import make_args_list, write_metric, write_total
# dictionary with keywords from benchmarks
network_dict = {
'mnist': 'lenet',
'mnist6': 'lenet',
'cifar10': 'resnet18_32x32',
'cifar6': 'resnet18_32x32',
'cifar100': 'resnet18_32x32',
'cifar50': 'resnet18_32x32',
'imagenet': 'resnet50',
'tin20': 'resnet18_64x64'
}
checkpoint_dict = {
'mnist': './results/checkpoints/mnist_lenet_acc98.50.ckpt',
'cifar10': './results/checkpoints/cifar10_res18_acc95.24.ckpt',
'cifar100': './results/checkpoints/cifar100_res18_acc77.10.ckpt',
'imagenet': './results/checkpoints/imagenet_res50_acc76.17.pth',
'mnist6': './results/checkpoints/osr/mnist6',
'cifar6': './results/checkpoints/osr/cifar6',
'cifar50': './results/checkpoints/osr/cifar50',
'tin20': './results/checkpoints/osr/tin20',
}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run a sweep')
parser.add_argument('--benchmarks',
nargs='+',
default=['mnist', 'cifar10', 'cifar100', 'imagenet'])
parser.add_argument('--methods', nargs='+', default=['msp'])
parser.add_argument('--metrics', nargs='+', default=['acc'])
parser.add_argument('--metric2save', nargs='+', default=['auroc'])
parser.add_argument('--update_form_only', action='store_true')
parser.add_argument('--output-dir', type=str, default='./results/')
parser.add_argument('--launcher',
default='local',
choices=['local', 'slurm'])
parser.add_argument('--merge-option', default='default')
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# different command with different job schedulers
if args.launcher == 'slurm':
command_prefix = ("PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \
--cpus-per-task=1 --ntasks-per-node=1 \
--kill-on-bad-exit=1 -w SG-IDC1-10-51-2-79 ")
else:
command_prefix = "PYTHONPATH='.':$PYTHONPATH "
# TODO: dynamic benchmark dict
benchmark_dict = {
'ood': ['cifar10', 'cifar100'],
'osr': ['cifar6', 'cifar50', 'mnist6', 'tin20'],
'acc': args.benchmarks
}
args_list = make_args_list(args.benchmarks, args.methods, args.metrics,
benchmark_dict)
print(f'{len(args_list)} experiments have been setup...', flush=True)
if not args.update_form_only:
for exp_id, [benchmark, method, metric] in enumerate(args_list):
print(f'Experiment #{exp_id+1} Starts...', flush=True)
print(f'Config: {benchmark}, {method}, {metric}', flush=True)
if metric in ['ood', 'fsood']:
command = (f'python main.py --config \
configs/datasets/{benchmark}/{benchmark}.yml \
configs/datasets/{benchmark}/{benchmark}_{metric}.yml \
configs/preprocessors/base_preprocessor.yml \
configs/networks/{network_dict[benchmark]}.yml \
configs/pipelines/test/test_{metric}.yml \
configs/postprocessors/{method}.yml \
--network.checkpoint {checkpoint_dict[benchmark]} \
--merge_option {args.merge_option} \
--output_dir {args.output_dir}')
os.system(command_prefix + command)
elif metric == 'osr':
for sid in range(1, 6):
print(f'5 OSR Exp, {sid} out of 5', flush=True)
command = (f'python main.py --config \
configs/datasets/osr_{benchmark}/{benchmark}_seed{sid}.yml \
configs/datasets/osr_{benchmark}/{benchmark}_seed{sid}_osr.yml \
configs/preprocessors/base_preprocessor.yml \
configs/networks/{network_dict[benchmark]}.yml \
configs/pipelines/test/test_osr.yml \
configs/postprocessors/{method}.yml \
--network.checkpoint {checkpoint_dict[benchmark]}_seed{sid}.ckpt \
--output_dir {args.output_dir} \
--merge_option {args.merge_option}')
os.system(command_prefix + command)
elif metric in ['acc', 'ece']:
command = (f'python main.py --config \
configs/datasets/{benchmark}/{benchmark}.yml \
configs/preprocessors/base_preprocessor.yml \
configs/networks/{network_dict[benchmark]}.yml \
configs/pipelines/test/test_{metric}.yml \
configs/postprocessors/{method}.yml \
--network.checkpoint {checkpoint_dict[benchmark]} \
--output_dir {args.output_dir} \
--merge_option {args.merge_option}')
os.system(command_prefix + command)
else:
raise ValueError('Unexpected Metric...')
folder_list = os.listdir(args.output_dir)
# TODO: do not hard code -8
save_line_dict = {'ood': -8, 'osr': -1, 'acc': -1}
# TODO: extend according to config
args.benchmarks.extend([
'tin', 'nearood', 'mnist', 'svhn', 'texture', 'place365', 'places365',
'farood'
])
# TODO: try to find farood and near ood in another way, user can se t what to save by changing ood's list
main_content_extract_dict = {'ood': ['nearood', 'farood'], 'osr': [-1]}
write_metric(args, folder_list, save_line_dict, benchmark_dict)
write_total(args, folder_list, save_line_dict, benchmark_dict,
main_content_extract_dict)
| 5,797 | 44.296875 | 109 | py |
null | OpenOOD-main/scripts/sweep/sweep_posthoc.sh | # sh ./scripts/sweep/sweep_posthoc.sh
python ./scripts/sweep/sweep_posthoc.py \
--benchmarks 'cifar10' \
--methods 'msp' \
--metrics 'ood' \
--metric2save 'fpr95' 'auroc' 'aupr_in' \
--output-dir './results/ood' \
--launcher 'local' \
--update_form_only
| 254 | 24.5 | 41 | sh |
null | OpenOOD-main/scripts/sweep/sweep_posthoc_ood.sh | # sh ./scripts/sweep/sweep_posthoc-backup.sh
python ./scripts/sweep/sweep_posthoc.py \
--benchmarks 'cifar10' 'cifar100' \
--methods 'msp' 'odin' 'mds' 'gram' 'ebo' 'gradnorm' 'react' 'dice' 'vim' 'mls' 'klm' 'knn' \
--metrics 'ood' \
--metric2save 'fpr95' 'auroc' 'aupr_in' \
--output-dir './results/ood' \
--launcher 'local' \
--update_form_only
| 348 | 33.9 | 93 | sh |
null | OpenOOD-main/scripts/sweep/sweep_posthoc_osr.sh | # sh ./scripts/sweep/sweep_posthoc_osr.sh
python ./scripts/sweep/sweep_posthoc.py \
--benchmarks 'cifar6' 'cifar50' 'mnist6' 'tin20' \
--methods 'msp' \
--metrics 'osr' \
--metric2save 'fpr95' 'auroc' 'aupr_in' \
--output-dir './results/osr' \
--launcher 'local' \
--update_form_only
| 284 | 27.5 | 50 | sh |
null | OpenOOD-main/scripts/sweep/sweep_posthoc_total.sh | # sh ./scripts/sweep/sweep_posthoc_total.sh
python ./scripts/sweep/sweep_posthoc.py \
--benchmarks 'cifar6' 'cifar50' 'mnist6' 'tin20' 'cifar10' 'cifar100' \
--methods 'msp' 'odin' 'mds' 'gram' 'ebo' 'gradnorm' 'react' 'dice' 'vim' 'mls' 'klm' 'knn' \
--metrics 'osr' 'ood' \
--metric2save 'fpr95' 'auroc' 'aupr_in' \
--output-dir './results/total' \
--launcher 'local' \
--merge-option 'pass' \
--update_form_only
| 416 | 36.909091 | 93 | sh |
null | OpenOOD-main/scripts/sweep/sweep_train.py | import argparse
import os
# dictionary with keywords from benchmarks
network_dict = {
'mnist': 'lenet',
'mnist6': 'lenet',
'cifar10': 'resnet18_32x32',
'cifar6': 'resnet18_32x32',
'cifar100': 'resnet18_32x32',
'cifar50': 'resnet18_32x32',
'imagenet': 'resnet50',
'tin20': 'resnet18_64x64'
}
def make_args_list(benchmarks, methods, metrics):
args_list = []
for benchmark in benchmarks:
for method in methods:
for metric in metrics:
args_list.append([benchmark, method, metric])
return args_list
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run a sweep')
parser.add_argument('--benchmarks',
nargs='+',
default=['mnist', 'cifar10', 'cifar100', 'imagenet'])
parser.add_argument('--launcher',
default='local',
choices=['local', 'slurm'])
args = parser.parse_args()
# different command with different job schedulers
if args.launcher == 'slurm':
command_prefix = ("PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \
--cpus-per-task=1 --ntasks-per-node=1 \
--kill-on-bad-exit=1 -w SG-IDC1-10-51-2-67 ")
else:
command_prefix = "PYTHONPATH='.':$PYTHONPATH "
print(f'{len(args.benchmarks)} experiments have been setup...', flush=True)
for exp_id, benchmark in enumerate(args.benchmarks):
print(f'Experiment #{exp_id} Starts...', flush=True)
for sid in range(1, 6):
print(f'5 OSR Exp, {sid} out of 5', flush=True)
command = (f'python main.py --config \
configs/datasets/osr_{benchmark}/{benchmark}_seed{sid}.yml \
configs/preprocessors/base_preprocessor.yml \
configs/networks/{network_dict[benchmark]}.yml \
configs/pipelines/train/baseline.yml')
os.system(command_prefix + command + ' &')
| 2,041 | 35.464286 | 79 | py |
null | OpenOOD-main/scripts/sweep/sweep_train.sh | # sh ./scripts/sweep/sweep_train.sh
python ./scripts/sweep/sweep_train.py \
--benchmarks 'tin20' \
--launcher 'slurm'
| 118 | 22.8 | 39 | sh |
null | OpenOOD-main/scripts/sweep/write_metrics.py | import csv
import os
import numpy as np
def make_args_list(benchmarks, methods, metrics, benchmark_dict):
args_list = []
for metric in metrics:
for benchmark in set(benchmarks) & set(benchmark_dict[metric]):
for method in methods:
args_list.append([benchmark, method, metric])
return args_list
def write_metric(args, folder_list, save_line_dict, benchmark_dict):
metric_list = [
'fpr95', 'auroc', 'aupr_in', 'aupr_out', 'ccr_4', 'ccr_3', 'ccr_2',
'ccr_1', 'acc'
]
save_list = []
for metric in args.metric2save:
save_list.append(metric_list.index(metric) + 1)
for metric in args.metrics:
if metric == 'ood':
for benchmark in set(args.benchmarks) & set(
benchmark_dict[metric]):
args_list = make_args_list([benchmark], args.methods, ['ood'],
benchmark_dict)
sub_form_content = []
for key_param in args_list:
for folder in folder_list:
key_folder = folder.split('_')
if all(key in key_folder for key in key_param):
target_folder = folder
break
else:
print("No respective folder path, something's wrong.")
raise FileNotFoundError
# quit()
with open(
os.path.join(args.output_dir, target_folder,
'ood.csv'), 'r') as f:
lines = f.readlines()[save_line_dict[key_param[-1]]:]
sub_line_content = {}
sub_line_content['method/{}'.format(
args.metric2save)] = key_param[1]
for line in lines:
split = line.split(',')
content = ''
for metric in save_list:
content = content + '{:.2f}'.format(
float(split[metric])) + ' / '
else:
content = content[:-3]
# use method name as key
sub_line_content[split[0]] = content
sub_form_content.append(sub_line_content)
csv_path = os.path.join(args.output_dir,
'{}_ood.csv'.format(key_param[0]))
with open(csv_path, 'w', newline='') as csvfile:
fieldnames = order_fieldnames(
list(sub_form_content[0].keys()), args)
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for sub_line_content in sub_form_content:
writer.writerow(sub_line_content)
elif metric == 'osr':
sub_form_content = []
for method in args.methods:
args_list = make_args_list(args.benchmarks, [method], ['osr'],
benchmark_dict)
sub_line_content = {}
for key_param in args_list:
sub_line_content['method/{}'.format(
args.metric2save)] = key_param[1]
target_folder = []
seeds = ['seed1', 'seed2', 'seed3', 'seed4', 'seed5']
for seed in seeds:
key_param.append(seed)
for folder in folder_list:
key_folder = folder.split('_')
if all(key in key_folder for key in key_param):
target_folder.append(folder)
break
else:
print(
"No respective folder path, something's wrong."
)
raise FileNotFoundError
# quit()
key_param.pop(-1)
temp = np.ndarray(shape=(len(seeds), len(save_list)))
for i, folder in enumerate(target_folder):
with open(
os.path.join(args.output_dir, folder,
'ood.csv'), 'r') as f:
lines = f.readlines(
)[save_line_dict[key_param[-1]]:]
for line in lines:
split = line.split(',')
for j, metric_index in enumerate(save_list):
temp[i][j] = split[metric_index]
content = ''
for item in np.mean(temp, axis=0):
content = content + '{:.2f}'.format(item) + ' / '
else:
content = content[:-3]
sub_line_content[key_param[0]] = content
sub_form_content.append(sub_line_content)
csv_path = os.path.join(args.output_dir, 'total_osr.csv')
with open(csv_path, 'w', newline='') as csvfile:
fieldnames = order_fieldnames(list(sub_form_content[0].keys()),
args)
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for sub_line_content in sub_form_content:
writer.writerow(sub_line_content)
def write_total(args, folder_list, save_line_dict, benchmark_dict,
main_content_extract_dict):
main_form_content = []
for method in args.methods:
main_line_content = {}
for metric in args.metrics:
args_list = make_args_list(args.benchmarks, [method], [metric],
benchmark_dict)
for key_param in args_list:
main_line_content['method --> auroc'] = key_param[1]
if metric == 'ood':
for folder in folder_list:
key_folder = folder.split('_')
if all(key in key_folder for key in key_param):
target_folder = folder
break
else:
print("No respective folder path, something's wrong.")
# quit()
with open(
os.path.join(args.output_dir, target_folder,
'ood.csv'), 'r') as f:
lines = f.readlines()[save_line_dict[key_param[-1]]:]
content = ''
for line in lines:
if line.split(',')[0] in main_content_extract_dict[
key_param[-1]]:
# take auroc only
content = content + '{:.2f}'.format(
float(line.split(',')[2])) + ' / '
else:
content = content[:-3]
# use benchmark name as key
main_line_content[key_param[0]] = content
if metric == 'osr':
target_folder = []
seeds = ['seed1', 'seed2', 'seed3', 'seed4', 'seed5']
for seed in seeds:
key_param.append(seed)
for folder in folder_list:
key_folder = folder.split('_')
if all(key in key_folder for key in key_param):
target_folder.append(folder)
break
else:
print(
"No respective folder path, something's wrong."
)
# quit()
key_param.pop(-1)
temp = np.ndarray(shape=(len(seeds), 1))
for i, folder in enumerate(target_folder):
with open(
os.path.join(args.output_dir, folder,
'ood.csv'), 'r') as f:
lines = f.readlines(
)[save_line_dict[key_param[-1]]:]
for line in lines:
split = line.split(',')
temp[i] = split[2]
content = '{:.2f}'.format(np.mean(temp, axis=0).item())
main_line_content[key_param[0]] = content
main_form_content.append(main_line_content)
csv_path = os.path.join(args.output_dir, 'total_result.csv')
with open(csv_path, 'w', newline='') as csvfile:
fieldnames = order_fieldnames(list(main_form_content[0].keys()), args)
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for main_line_content in main_form_content:
writer.writerow(main_line_content)
verify_dir = './results/total'
for folder in os.listdir(verify_dir):
if os.path.isdir(os.path.join(verify_dir, folder)):
if 'ood.csv' not in os.listdir(os.path.join(verify_dir, folder)):
# if 'seed1' in folder.split('_'):
print(folder)
def order_fieldnames(keys, args):
ordered_keys = []
ordered_keys.append(keys[0])
for item in args.benchmarks:
if item in keys:
ordered_keys.append(item)
return ordered_keys
| 9,826 | 42.482301 | 79 | py |
null | OpenOOD-main/scripts/uncertainty/augmix/cifar100_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/augmix/cifar100_test_ood_msp.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar100 \
--root ./results/cifar100_resnet18_32x32_augmix_e100_lr0.1_no-jsd \
--postprocessor msp \
--save-score --save-csv
| 475 | 30.733333 | 70 | sh |
null | OpenOOD-main/scripts/uncertainty/augmix/cifar100_train_augmix.sh | #!/bin/bash
# sh scripts/uncertainty/augmix/cifar10_train_augmix.sh
# somehow the loss will diverge to NaN if using JSD
# so just use no-jsd here
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/train_augmix.yml \
configs/preprocessors/augmix_preprocessor.yml \
--preprocessor.severity 3 \
--trainer.trainer_args.jsd False \
--dataset.train.dataset_class ImglistDataset \
--optimizer.num_epochs 100 \
--dataset.train.batch_size 128 \
--seed 0 \
--mark no-jsd
| 584 | 31.5 | 55 | sh |
null | OpenOOD-main/scripts/uncertainty/augmix/cifar10_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/augmix/cifar10_test_ood_msp.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar10 \
--root ./results/cifar10_resnet18_32x32_augmix_e100_lr0.1_no-jsd \
--postprocessor msp \
--save-score --save-csv
| 472 | 30.533333 | 69 | sh |
null | OpenOOD-main/scripts/uncertainty/augmix/cifar10_train_augmix.sh | #!/bin/bash
# sh scripts/uncertainty/augmix/cifar10_train_augmix.sh
# somehow the loss will diverge to NaN if using JSD
# so just use no-jsd here
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/train_augmix.yml \
configs/preprocessors/augmix_preprocessor.yml \
--preprocessor.severity 3 \
--trainer.trainer_args.jsd False \
--dataset.train.dataset_class ImglistDataset \
--optimizer.num_epochs 100 \
--dataset.train.batch_size 128 \
--seed 0 \
--mark no-jsd
| 582 | 31.388889 | 55 | sh |
null | OpenOOD-main/scripts/uncertainty/augmix/imagenet200_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/augmix/imagenet200_test_ood_msp.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
# ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_augmix_e90_lr0.1_default \
--postprocessor msp \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_augmix_e90_lr0.1_default \
--postprocessor msp \
--save-score --save-csv --fsood
| 723 | 29.166667 | 76 | sh |
null | OpenOOD-main/scripts/uncertainty/augmix/imagenet200_train_augmix.sh | #!/bin/bash
# sh scripts/uncertainty/augmix/imagenet200_train_augmix.sh
python main.py \
--config configs/datasets/imagenet200/imagenet200.yml \
configs/networks/resnet18_224x224.yml \
configs/pipelines/train/train_augmix.yml \
configs/preprocessors/augmix_preprocessor.yml \
--dataset.train.dataset_class ImglistAugMixDataset \
--optimizer.num_epochs 90 \
--dataset.train.batch_size 128 \
--num_gpus 2 --num_workers 16 \
--merge_option merge \
--seed 0
| 495 | 32.066667 | 59 | sh |
null | OpenOOD-main/scripts/uncertainty/augmix/imagenet_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/augmix/imagenet_test_ood_msp.sh
############################################
# we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood_imagenet.py
# available architectures:
# resnet50
# ood
python scripts/eval_ood_imagenet.py \
--ckpt-path ./results/imagenet_resnet50_tvsv1_augmix_default/ckpt.pth \
--arch resnet50 \
--postprocessor msp \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood_imagenet.py \
--ckpt-path ./results/imagenet_resnet50_tvsv1_augmix_default/ckpt.pth \
--arch resnet50 \
--postprocessor msp \
--save-score --save-csv --fsood
| 687 | 27.666667 | 74 | sh |
null | OpenOOD-main/scripts/uncertainty/cutmix/cifar100_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/cutmix/cifar100_test_ood_msp.sh
# GPU=1
# CPU=1
# node=36
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--num_workers 8 \
--network.checkpoint 'results/cifar100_resnet18_32x32_cutmix_e100_lr0.1_cutmix/best.ckpt' \
--mark cutmix
| 708 | 29.826087 | 91 | sh |
null | OpenOOD-main/scripts/uncertainty/cutmix/cifar100_train_cutmix.sh | #!/bin/bash
# sh scripts/uncertainty/cutmix/cifar100_train_cutmix.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/train_cutmix.yml \
configs/preprocessors/base_preprocessor.yml \
--num_workers 8 \
--optimizer.num_epochs 100 \
--trainer.trainer_args.cutmix_prob 0.5 \
--mark cutmix
| 621 | 24.916667 | 56 | sh |
null | OpenOOD-main/scripts/uncertainty/cutmix/cifar10_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/cutmix/cifar10_test_ood_msp.sh
# GPU=1
# CPU=1
# node=36
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--num_workers 8 \
--network.checkpoint 'results/cifar10_resnet18_32x32_cutmix_e100_lr0.1_cutmix/best.ckpt' \
--mark cutmix
| 702 | 29.565217 | 90 | sh |
null | OpenOOD-main/scripts/uncertainty/cutmix/cifar10_train_cutmix.sh | #!/bin/bash
# sh scripts/uncertainty/cutmix/cifar10_train_cutmix.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/train_cutmix.yml \
configs/preprocessors/base_preprocessor.yml \
--num_workers 8 \
--optimizer.num_epochs 100 \
--trainer.trainer_args.cutmix_prob 0.5 \
--mark cutmix
| 618 | 24.791667 | 55 | sh |
null | OpenOOD-main/scripts/uncertainty/cutmix/mnist_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/cutmix/mnist_test_ood_msp.sh
# GPU=1
# CPU=1
# node=36
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/mnist/mnist.yml \
configs/datasets/mnist/mnist_ood.yml \
configs/networks/lenet.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--num_workers 8 \
--network.checkpoint 'results/mnist_lenet_cutmix_e100_lr0.1_cutmix/best.ckpt' \
--mark cutmix
| 672 | 28.26087 | 79 | sh |
null | OpenOOD-main/scripts/uncertainty/cutmix/mnist_train_cutmix.sh | #!/bin/bash
# sh scripts/uncertainty/cutmix/mnist_train_cutmix.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/mnist/mnist.yml \
configs/networks/lenet.yml \
configs/pipelines/train/train_cutmix.yml \
configs/preprocessors/base_preprocessor.yml \
--num_workers 8 \
--optimizer.num_epochs 100 \
--trainer.trainer_args.cutmix_prob 0.5 \
--mark cutmix
| 603 | 24.166667 | 53 | sh |
null | OpenOOD-main/scripts/uncertainty/cutmix/osr_mnist6_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/cutmix/osr_mnist6_test_ood_msp.sh
GPU=1
CPU=1
node=73
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/osr_mnist6/mnist6_seed1.yml \
configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \
configs/networks/lenet.yml \
configs/pipelines/test/test_osr.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--num_workers 8 \
--network.checkpoint 'results/osr_mnist6_seed1_lenet_cutmix_e100_lr0.1_cutmix/best.ckpt' \
--mark cutmix
| 708 | 28.541667 | 90 | sh |
null | OpenOOD-main/scripts/uncertainty/cutmix/osr_mnist6_train_cutmix.sh | #!/bin/bash
# sh scripts/uncertainty/cutmix/osr_mnist6_train_cutmix.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/osr_mnist6/mnist6_seed1.yml \
configs/networks/lenet.yml \
configs/pipelines/train/train_cutmix.yml \
configs/preprocessors/base_preprocessor.yml \
--num_workers 8 \
--optimizer.num_epochs 100 \
--trainer.trainer_args.cutmix_prob 0.5 \
--mark cutmix
| 620 | 24.875 | 58 | sh |
null | OpenOOD-main/scripts/uncertainty/cutmix/sweep.py | # python scripts/uncertainty/cutmix/sweep.py
import os
config = [
['osr_cifar6/cifar6_seed1.yml', 'resnet18_32x32'],
['osr_cifar50/cifar50_seed1.yml', 'resnet18_32x32'],
['osr_tin20/tin20_seed1.yml', 'resnet18_64x64'],
['osr_mnist4/mnist4_seed1.yml', 'lenet'],
['mnist/mnist.yml', 'lenet'],
]
for [dataset, network] in config:
command = (f"PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \
--cpus-per-task=1 --ntasks-per-node=1 \
--kill-on-bad-exit=1 --job-name=openood \
python main.py \
--config configs/datasets/{dataset} \
configs/networks/{network}.yml \
configs/pipelines/train/train_cutmix.yml \
configs/preprocessors/base_preprocessor.yml \
--network.pretrained False \
--trainer.trainer_args.cutmix_prob 0.5 \
--optimizer.num_epochs 100 \
--num_workers 8 &")
os.system(command)
| 887 | 31.888889 | 56 | py |
null | OpenOOD-main/scripts/uncertainty/cutout/cifar100_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/cutout/cifar100_test_ood_msp.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar100 \
--root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_cutout-1-8 \
--postprocessor msp \
--save-score --save-csv
| 477 | 30.866667 | 72 | sh |
null | OpenOOD-main/scripts/uncertainty/cutout/cifar100_train_cutout.sh | #!/bin/bash
# sh scripts/uncertainty/cutout/cifar100_train_cutout.sh
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/cutout_preprocessor.yml \
--preprocessor.length 8 \
--seed 0 \
--mark cutout-1-8
| 345 | 27.833333 | 56 | sh |
null | OpenOOD-main/scripts/uncertainty/cutout/cifar10_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/cutout/cifar10_test_ood_msp.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar10 \
--root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_cutout-1-16 \
--postprocessor msp \
--save-score --save-csv
| 475 | 30.733333 | 72 | sh |
null | OpenOOD-main/scripts/uncertainty/cutout/cifar10_train_cutout.sh | #!/bin/bash
# sh scripts/uncertainty/cutout/cifar10_train_cutout.sh
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/cutout_preprocessor.yml \
--seed 0 \
--mark cutout-1-16
| 313 | 27.545455 | 55 | sh |
null | OpenOOD-main/scripts/uncertainty/deepaugment/imagenet200_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/deepaugment/imagenet200_test_ood_msp.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
# ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_base_e30_lr0.1_deepaugment \
--postprocessor msp \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_base_e30_lr0.1_deepaugment \
--postprocessor msp \
--save-score --save-csv --fsood
| 732 | 29.541667 | 78 | sh |
null | OpenOOD-main/scripts/uncertainty/deepaugment/imagenet200_train_deepaugment.sh | #!/bin/bash
# sh scripts/uncertainty/deepaugment/imagenet200_train_deepaugment.sh
# the model sees three times the data as the baseline
# so only trains for 90/3=30 epochs
python main.py \
--config configs/datasets/imagenet200/imagenet200.yml \
configs/networks/resnet18_224x224.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/base_preprocessor.yml \
--dataset.train.imglist_pth ./data/benchmark_imglist/imagenet200/train_imagenet200_deepaugment.txt \
--optimizer.num_epochs 30 \
--dataset.train.batch_size 128 \
--num_gpus 2 --num_workers 16 \
--merge_option merge \
--seed 0 \
--mark deepaugment
| 662 | 35.833333 | 104 | sh |
null | OpenOOD-main/scripts/uncertainty/deepaugment/imagenet_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/deepaugment/imagenet_test_ood_msp.sh
############################################
# we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood_imagenet.py
# available architectures:
# resnet50
# ood
python scripts/eval_ood_imagenet.py \
--ckpt-path ./results/imagenet_resnet50_tvsv1_base_deepaugment/ckpt.pth \
--arch resnet50 \
--postprocessor msp \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood_imagenet.py \
--ckpt-path ./results/imagenet_resnet50_tvsv1_base_deepaugment/ckpt.pth \
--arch resnet50 \
--postprocessor msp \
--save-score --save-csv --fsood
| 696 | 28.041667 | 76 | sh |
null | OpenOOD-main/scripts/uncertainty/ensemble/2_mnist_ensemble_train.sh | #!/bin/bash
# sh scripts/d_uncertainty/2_mnist_ensemble_train.sh
# for ensemble (mnist + lenet)
GPU=1
CPU=1
node=73
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
-w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/digits/mnist.yml \
configs/networks/lenet.yml \
configs/preprocessors/base_preprocessor.yml \
configs/pipelines/train/baseline.yml \
--optimizer.num_epochs 50 \
--num_workers 8 \
--output_dir ./results/lenet_ensemble_pretrained \
--exp_name network5
| 629 | 24.2 | 52 | sh |
null | OpenOOD-main/scripts/uncertainty/ensemble/cifar100_test_ood_ensemble.sh | #!/bin/bash
# sh scripts/uncertainty/ensemble/cifar100_test_ood_ensemble.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/ensemble.yml \
--network.pretrained False \
--num_workers 8 \
--mark 0 \
--postprocessor.postprocessor_args.network_name resnet18_32x32 \
--postprocessor.postprocessor_args.checkpoint_root 'results/cifar100_resnet18_test_ensemble' \
--postprocessor.postprocessor_args.num_networks 5 \
--dataset.test.batch_size 64 \
--dataset.val.batch_size 64 \
--ood_dataset.batch_size 64
| 959 | 31 | 94 | sh |
null | OpenOOD-main/scripts/uncertainty/ensemble/cifar10_test_ood_ensemble.sh | #!/bin/bash
# sh scripts/uncertainty/ensemble/cifar10_test_ood_ensemble.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/ensemble.yml \
--network.pretrained False \
--num_workers 8 \
--mark 0 \
--postprocessor.postprocessor_args.network_name resnet18_32x32 \
--postprocessor.postprocessor_args.checkpoint_root 'results/cifar10_resnet18_test_ensemble' \
--postprocessor.postprocessor_args.num_networks 5 \
--dataset.test.batch_size 64 \
--dataset.val.batch_size 64 \
--ood_dataset.batch_size 64
| 953 | 30.8 | 93 | sh |
null | OpenOOD-main/scripts/uncertainty/ensemble/mnist_ensemble_test.sh | #!/bin/bash
# sh scripts/uncertainty/ensemble/mnist_ensemble_test.sh
#GPU=1
#CPU=1
#node=73
#jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/mnist/mnist.yml \
configs/datasets/mnist/mnist_ood.yml \
configs/networks/lenet.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/ensemble.yml \
--network.pretrained False \
--num_workers 8 \
--mark 0 \
--postprocessor.postprocessor_args.network_name lenet \
--postprocessor.postprocessor_args.checkpoint_root 'results/mnist_lenet_test_ensemble' \
--postprocessor.postprocessor_args.num_networks 5 \
--dataset.test.batch_size 64 \
--dataset.val.batch_size 64 \
--ood_dataset.batch_size 64
| 908 | 30.344828 | 88 | sh |
null | OpenOOD-main/scripts/uncertainty/ensemble/osr_mnist_test_ood_ensemble.sh | #!/bin/bash
# sh scripts/uncertainty/ensemble/osr_mnist_test_ood_ensemble.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/osr_mnist6/mnist6_seed1.yml \
configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \
configs/networks/lenet.yml \
configs/pipelines/test/test_osr.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/ensemble.yml \
--network.pretrained False \
--num_workers 8 \
--mark 0 \
--postprocessor.postprocessor_args.network_name lenet \
--postprocessor.postprocessor_args.checkpoint_root 'results/_osr_mnist6_test_ensemble' \
--postprocessor.postprocessor_args.num_networks 5 \
--dataset.test.batch_size 64 \
--dataset.val.batch_size 64 \
--ood_dataset.batch_size 64
| 948 | 30.633333 | 88 | sh |
null | OpenOOD-main/scripts/uncertainty/ensemble/osr_test_ood_ensemble.sh | #!/bin/bash
# sh scripts/uncertainty/ensemble/osr_test_ood_ensemble.sh
GPU=1
CPU=1
node=73
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} \
python main.py \
--config configs/datasets/osr_tin20/tin20_seed1.yml \
configs/datasets/osr_tin20/tin20_seed1_ood.yml \
configs/networks/resnet18_64x64.yml \
configs/pipelines/test/test_osr.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/ensemble.yml \
--network.pretrained False \
--num_workers 8 \
--mark 0 \
--postprocessor.postprocessor_args.network_name resnet18_64x64 \
--postprocessor.postprocessor_args.checkpoint_root 'results/osr_tin20_seed1' \
--postprocessor.postprocessor_args.num_networks 5 \
--dataset.test.batch_size 64 \
--dataset.val.batch_size 64 \
--ood_dataset.batch_size 64
| 904 | 30.206897 | 78 | sh |
null | OpenOOD-main/scripts/uncertainty/mc_dropout/cifar100_test_mc_dropout.sh | #!/bin/bash
# sh scripts/uncertainty/mc_dropout/cifar100_test_mc_dropout.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/dropout_net.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/dropout.yml \
--num_workers 8 \
--network.checkpoint 'results/cifar100_dropout_net_base_e100_lr0.1_default/best.ckpt' \
--mark 0
| 716 | 26.576923 | 87 | sh |
null | OpenOOD-main/scripts/uncertainty/mc_dropout/cifar100_train_mc_dropout.sh | #!/bin/bash
# sh scripts/uncertainty/mc_dropout/cifar100_train_mc_dropout.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/networks/dropout_net.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/base_preprocessor.yml \
--network.backbone.name resnet18_32x32 \
--network.backbone.pretrained False \
--optimizer.num_epochs 100 \
--num_workers 8
| 645 | 24.84 | 64 | sh |
null | OpenOOD-main/scripts/uncertainty/mc_dropout/cifar10_test_mc_dropout.sh | #!/bin/bash
# sh scripts/uncertainty/mc_dropout/cifar10_test_mc_dropout.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/dropout_net.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/dropout.yml \
--evaluator.name ood \
--num_workers 8 \
--network.checkpoint 'results/cifar10_dropout_net_base_e100_lr0.1_default/best.ckpt' \
--mark 0
| 733 | 26.185185 | 86 | sh |