Search is not available for this dataset
repo stringlengths 2 152 ⌀ | file stringlengths 15 239 | code stringlengths 0 58.4M | file_length int64 0 58.4M | avg_line_length float64 0 1.81M | max_line_length int64 0 12.7M | extension_type stringclasses 364 values |
|---|---|---|---|---|---|---|
null | OpenOOD-main/scripts/uncertainty/mc_dropout/cifar10_train_mc_dropout.sh | #!/bin/bash
# sh scripts/uncertainty/mc_dropout/cifar10_train_mc_dropout.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/networks/dropout_net.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/base_preprocessor.yml \
--network.backbone.name resnet18_32x32 \
--network.backbone.pretrained False \
--optimizer.num_epochs 100 \
--num_workers 8
| 642 | 24.72 | 63 | sh |
null | OpenOOD-main/scripts/uncertainty/mc_dropout/mnist_test_mc_dropout.sh | #!/bin/bash
# sh scripts/uncertainty/mc_dropout/mnist_test_mc_dropout.sh
#GPU=1
#CPU=1
#node=73
#jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/mnist/mnist.yml \
configs/datasets/mnist/mnist_ood.yml \
configs/networks/dropout_net.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/dropout.yml \
--network.backbone.name lenet \
--num_workers 8 \
--network.checkpoint 'results/mnist_dropout_net_base_e100_lr0.1_default/best.ckpt' \
--mark 0
| 717 | 28.916667 | 84 | sh |
null | OpenOOD-main/scripts/uncertainty/mc_dropout/mnist_train_mc_dropout.sh | #!/bin/bash
# sh scripts/uncertainty/mc_dropout/mnist_train_mc_dropout.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/mnist/mnist.yml \
configs/networks/dropout_net.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/base_preprocessor.yml \
--network.backbone.name lenet \
--network.backbone.pretrained False \
--optimizer.num_epochs 100 \
--num_workers 8
| 627 | 24.12 | 61 | sh |
null | OpenOOD-main/scripts/uncertainty/mc_dropout/osr_mnist6_test_mc_dropout.sh | #!/bin/bash
# sh scripts/uncertainty/mc_dropout/osr_mnist6_test_mc_dropout.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/osr_mnist6/mnist6_seed1.yml \
configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \
configs/networks/dropout_net.yml \
configs/pipelines/test/test_osr.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/dropout.yml \
--network.backbone.name lenet \
--num_workers 8 \
--network.checkpoint 'results/osr_mnist6_seed1_dropout_net_base_e100_lr0.1_default/best.ckpt' \
--mark 0
| 770 | 27.555556 | 95 | sh |
null | OpenOOD-main/scripts/uncertainty/mc_dropout/osr_mnist6_train_mc_dropout.sh | #!/bin/bash
# sh scripts/uncertainty/mc_dropout/osr_mnist6_train_mc_dropout.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/osr_mnist6/mnist6_seed1.yml \
configs/networks/dropout_net.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/base_preprocessor.yml \
--network.backbone.name lenet \
--network.backbone.pretrained False \
--optimizer.num_epochs 100 \
--num_workers 8
| 644 | 24.8 | 66 | sh |
null | OpenOOD-main/scripts/uncertainty/mc_dropout/sweep.py | # python scripts/uncertainty/mc_dropout/sweep.py
import os
config = [
['osr_cifar6/cifar6_seed1.yml', 'resnet18_32x32'],
['osr_cifar50/cifar50_seed1.yml', 'resnet18_32x32'],
['osr_tin20/tin20_seed1.yml', 'resnet18_64x64'],
['osr_mnist4/mnist4_seed1.yml', 'lenet'],
['mnist/mnist.yml', 'lenet'],
]
for [dataset, network] in config:
command = (f"PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \
--cpus-per-task=1 --ntasks-per-node=1 \
--kill-on-bad-exit=1 --job-name=openood \
python main.py \
--config configs/datasets/{dataset} \
configs/networks/dropout_net.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/base_preprocessor.yml \
--network.backbone.name {network} \
--network.pretrained False \
--optimizer.num_epochs 100 \
--num_workers 8 &")
os.system(command)
| 884 | 31.777778 | 56 | py |
null | OpenOOD-main/scripts/uncertainty/mc_dropout/sweep_test.py | # python scripts/uncertainty/mc_dropout/sweep_test.py
import os
config = [
[
'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml',
'resnet18_32x32',
'osr_cifar6_seed1_dropout_net_base_e100_lr0.1_default'
],
[
'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml',
'resnet18_32x32',
'osr_cifar50_seed1_dropout_net_base_e100_lr0.1_default'
],
[
'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml',
'resnet18_64x64', 'osr_tin20_seed1_dropout_net_base_e100_lr0.1_default'
],
[
'osr_mnist4/mnist4_seed1.yml', 'osr_mnist4/mnist4_seed1_ood.yml',
'lenet', 'osr_mnist4_seed1_dropout_net_base_e100_lr0.1_default'
],
]
for [dataset, ood_data, network, pth] in config:
command = (f"PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \
--cpus-per-task=1 --ntasks-per-node=1 \
--kill-on-bad-exit=1 --job-name=openood \
python main.py \
--config configs/datasets/{dataset} \
configs/datasets/{ood_data} \
configs/networks/dropout_net.yml \
configs/pipelines/test/test_osr.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/dropout.yml \
--num_workers 8 \
--network.checkpoint 'results/{pth}/best.ckpt' \
--mark 0 \
--merge_option merge &")
os.system(command)
| 1,401 | 32.380952 | 79 | py |
null | OpenOOD-main/scripts/uncertainty/mixup/cifar100_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/mixup/cifar100_test_ood_msp.sh
# GPU=1
# CPU=1
# node=36
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--num_workers 8 \
--network.checkpoint 'results/cifar100_resnet18_32x32_mixup_e100_lr0.1_alpha0.2/best.ckpt' \
--mark mixup
| 707 | 29.782609 | 92 | sh |
null | OpenOOD-main/scripts/uncertainty/mixup/cifar100_train_mixup.sh | #!/bin/bash
# sh scripts/uncertainty/mixup/cifar100_train_mixup.sh
GPU=1
CPU=1
node=73
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/train_mixup.yml \
configs/preprocessors/base_preprocessor.yml \
--network.pretrained False \
--optimizer.num_epochs 100 \
--num_workers 8 | 580 | 26.666667 | 54 | sh |
null | OpenOOD-main/scripts/uncertainty/mixup/cifar10_test_ood_mixup.sh | #!/bin/bash
# sh scripts/uncertainty/mixup/cifar10_test_ood_mixup.sh
# GPU=1
# CPU=1
# node=36
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--num_workers 8 \
--network.checkpoint 'results/cifar10_resnet18_32x32_mixup_e100_lr0.1_alpha0.2_default/best.ckpt' \
--mark mixup
| 711 | 29.956522 | 99 | sh |
null | OpenOOD-main/scripts/uncertainty/mixup/cifar10_train_mixup.sh | #!/bin/bash
# sh scripts/uncertainty/mixup/cifar10_train_mixup.sh
GPU=1
CPU=1
node=73
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/train_mixup.yml \
configs/preprocessors/base_preprocessor.yml \
--network.pretrained False \
--optimizer.num_epochs 100 \
--num_workers 8
| 578 | 25.318182 | 53 | sh |
null | OpenOOD-main/scripts/uncertainty/mixup/mnist_test_ood_mixup.sh | #!/bin/bash
# sh scripts/uncertainty/mixup/mnist_test_ood_mixup.sh
# GPU=1
# CPU=1
# node=36
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/mnist/mnist.yml \
configs/datasets/mnist/mnist_ood.yml \
configs/networks/lenet.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--num_workers 8 \
--network.checkpoint 'results/mnist_lenet_mixup_e100_lr0.1_alpha0.2_default/best.ckpt' \
--mark mixup
| 681 | 28.652174 | 88 | sh |
null | OpenOOD-main/scripts/uncertainty/mixup/mnist_train_mixup.sh | #!/bin/bash
# sh scripts/uncertainty/mixup/mnist_train_mixup.sh
GPU=1
CPU=1
node=73
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/mnist/mnist.yml \
configs/networks/lenet.yml \
configs/pipelines/train/train_mixup.yml \
configs/preprocessors/base_preprocessor.yml \
--network.pretrained False \
--optimizer.num_epochs 100 \
--num_workers 8
| 563 | 24.636364 | 51 | sh |
null | OpenOOD-main/scripts/uncertainty/mixup/osr_mnist6_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/mixup/osr_mnist6_test_ood_msp.sh
GPU=1
CPU=1
node=73
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/osr_mnist6/mnist6_seed1.yml \
configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \
configs/networks/lenet.yml \
configs/pipelines/test/test_osr.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--num_workers 8 \
--network.checkpoint 'results/osr_mnist6_seed1_lenet_mixup_e100_lr0.1_alpha0.2_default/best.ckpt' \
--mark mixup
| 715 | 28.833333 | 99 | sh |
null | OpenOOD-main/scripts/uncertainty/mixup/osr_mnist6_train_mixup.sh | #!/bin/bash
# sh scripts/uncertainty/mixup/osr_mnist6_train_mixup.sh
GPU=1
CPU=1
node=73
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/osr_mnist6/mnist6_seed1.yml \
configs/networks/lenet.yml \
configs/pipelines/train/train_mixup.yml \
configs/preprocessors/base_preprocessor.yml \
--network.pretrained False \
--optimizer.num_epochs 100 \
--num_workers 8
| 580 | 25.409091 | 56 | sh |
null | OpenOOD-main/scripts/uncertainty/mixup/sweep.py | # python scripts/uncertainty/mixup/sweep.py
import os
config = [
['osr_cifar6/cifar6_seed1.yml', 'resnet18_32x32'],
['osr_cifar50/cifar50_seed1.yml', 'resnet18_32x32'],
['osr_tin20/tin20_seed1.yml', 'resnet18_64x64'],
['osr_mnist4/mnist4_seed1.yml', 'lenet'],
['mnist/mnist.yml', 'lenet'],
]
for [dataset, network] in config:
command = (f"PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \
--cpus-per-task=1 --ntasks-per-node=1 \
--kill-on-bad-exit=1 --job-name=openood \
-w SG-IDC1-10-51-2-75 \
python main.py \
--config configs/datasets/{dataset} \
configs/networks/{network}.yml \
configs/pipelines/train/train_mixup.yml \
configs/preprocessors/base_preprocessor.yml \
--network.pretrained False \
--optimizer.num_epochs 100 \
--num_workers 8 &")
os.system(command)
| 868 | 31.185185 | 56 | py |
null | OpenOOD-main/scripts/uncertainty/mixup/sweep_test.py | # python scripts/uncertainty/mixup/sweep_test.py
import os
config = [
[
'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml',
'resnet18_32x32',
'./results/cifar10_osr_resnet18_32x32_base_e100_lr0.1_default/best_epoch94_acc0.9773.ckpt'
],
[
'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml',
'resnet18_32x32',
'./results/cifar100_osr_resnet18_32x32_base_e100_lr0.1_default/best.ckpt'
],
]
for [dataset, ood_dataset, network, pth] in config:
command = (f"PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \
--cpus-per-task=1 --ntasks-per-node=1 \
--kill-on-bad-exit=1 --job-name=openood \
python main.py \
--config configs/datasets/{dataset} \
configs/datasets/{ood_dataset} \
configs/networks/{network}.yml \
configs/pipelines/test/test_osr.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--network.pretrained True \
--network.checkpoint {pth} \
--num_workers 8 \
--merge_option merge &")
os.system(command)
| 1,125 | 32.117647 | 98 | py |
null | OpenOOD-main/scripts/uncertainty/pixmix/cifar100_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/pixmix/cifar100_test_ood_msp.sh
# GPU=1
# CPU=1
# node=36
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--num_workers 8 \
--network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_pixmix/s0/best.ckpt' \
--mark pixmix
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar100 \
--root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_pixmix \
--postprocessor msp \
--save-score --save-csv
| 1,149 | 31.857143 | 96 | sh |
null | OpenOOD-main/scripts/uncertainty/pixmix/cifar100_train_pixmix.sh | #!/bin/bash
# sh scripts/uncertainty/pixmix/cifar100_train_pixmix.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/pixmix_preprocessor.yml \
--num_workers 8 \
--optimizer.num_epochs 100 \
--mark pixmix \
--seed 0
| 621 | 24.916667 | 56 | sh |
null | OpenOOD-main/scripts/uncertainty/pixmix/cifar10_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/pixmix/cifar10_test_ood_msp.sh
# GPU=1
# CPU=1
# node=36
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/preprocessors/base_preprocessor.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--num_workers 8 \
--network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_pixmix/s0/best.ckpt' \
--mark pixmix
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar10 \
--root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_pixmix \
--postprocessor msp \
--save-score --save-csv
| 1,191 | 32.111111 | 95 | sh |
null | OpenOOD-main/scripts/uncertainty/pixmix/cifar10_train_pixmix.sh | #!/bin/bash
# sh scripts/uncertainty/pixmix/cifar10_train_pixmix.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/pixmix_preprocessor.yml \
--num_workers 8 \
--optimizer.num_epochs 100 \
--mark pixmix \
--seed 0
| 618 | 24.791667 | 55 | sh |
null | OpenOOD-main/scripts/uncertainty/pixmix/imagenet200_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/pixmix/imagenet200_test_ood_msp.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
# ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_pixmix \
--postprocessor msp \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_pixmix \
--postprocessor msp \
--save-score --save-csv --fsood
| 717 | 28.916667 | 73 | sh |
null | OpenOOD-main/scripts/uncertainty/pixmix/imagenet200_train_pixmix.sh | #!/bin/bash
# sh scripts/uncertainty/pixmix/imagenet200_train_pixmix.sh
python main.py \
--config configs/datasets/imagenet200/imagenet200.yml \
configs/networks/resnet18_224x224.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/pixmix_preprocessor.yml \
--preprocessor.preprocessor_args.aug_severity 1 \
--preprocessor.preprocessor_args.beta 4 \
--optimizer.num_epochs 90 \
--dataset.train.batch_size 128 \
--num_gpus 2 --num_workers 16 \
--merge_option merge \
--seed ${SEED} \
--mark pixmix
| 560 | 32 | 59 | sh |
null | OpenOOD-main/scripts/uncertainty/pixmix/imagenet_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/pixmix/imagenet_test_ood_msp.sh
############################################
# we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood_imagenet.py
# available architectures:
# resnet50
# ood
python scripts/eval_ood_imagenet.py \
--ckpt-path ./results/imagenet_resnet50_tvsv1_base_pixmix/ckpt.pth \
--arch resnet50 \
--postprocessor msp \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood_imagenet.py \
--ckpt-path ./results/imagenet_resnet50_tvsv1_base_pixmix/ckpt.pth \
--arch resnet50 \
--postprocessor msp \
--save-score --save-csv --fsood
| 681 | 27.416667 | 71 | sh |
null | OpenOOD-main/scripts/uncertainty/pixmix/mnist_test_ood_pixmix.sh | !/bin/bash
# sh scripts/uncertainty/pixmix/mnist_test_ood_pixmix.sh
# GPU=1
# CPU=1
# node=36
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/mnist/mnist.yml \
configs/datasets/mnist/mnist_ood.yml \
configs/networks/lenet.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--num_workers 8 \
--network.checkpoint 'results/mnist_lenet_base_e100_lr0.1_pixmix/best.ckpt' \
--mark pixmix
| 672 | 28.26087 | 77 | sh |
null | OpenOOD-main/scripts/uncertainty/pixmix/mnist_train_pixmix.sh | #!/bin/bash
# sh scripts/uncertainty/pixmix/mnist_train_pixmix.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
#python main.py \
python main.py \
--config configs/datasets/mnist/mnist.yml \
configs/networks/lenet.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/pixmix_preprocessor.yml \
--num_workers 0 \
--optimizer.num_epochs 100 \
--mark pixmix
| 578 | 23.125 | 53 | sh |
null | OpenOOD-main/scripts/uncertainty/pixmix/osr_mnist6_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/pixmix/osr_mnist6_test_ood_msp.sh
GPU=1
CPU=1
node=73
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/osr_mnist6/mnist6_seed1.yml \
configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \
configs/networks/lenet.yml \
configs/pipelines/test/test_osr.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/msp.yml \
--num_workers 8 \
--network.checkpoint 'results/osr_mnist6_seed1_lenet_base_e100_lr0.1_pixmix/best.ckpt' \
--mark osr_mnist6_pixmix
| 717 | 28.916667 | 88 | sh |
null | OpenOOD-main/scripts/uncertainty/pixmix/osr_mnist6_train_pixmix.sh | #!/bin/bash
# sh scripts/uncertainty/pixmix/osr_mnist6_train_pixmix.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
CUDA_VISIBLE_DEVICES=1 python main.py \
--config configs/datasets/osr_mnist6/mnist6_seed1.yml \
configs/networks/lenet.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/pixmix_preprocessor.yml \
--dataset.train.batch_size 4096 \
--num_workers 0 \
--optimizer.num_epochs 100 \
--mark pixmix \
--merge_option merge
| 657 | 25.32 | 58 | sh |
null | OpenOOD-main/scripts/uncertainty/pixmix/sweep.py | # python scripts/uncertainty/pixmix/sweep.py
import os
config = [
['osr_cifar6/cifar6_seed1.yml', 'resnet18_32x32', 'cifar10'],
['osr_cifar50/cifar50_seed1.yml', 'resnet18_32x32', 'cifar100'],
['osr_tin20/tin20_seed1.yml', 'resnet18_64x64', 'tin'],
]
for [dataset, network, od] in config:
command = (f"PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \
--cpus-per-task=1 --ntasks-per-node=1 \
--kill-on-bad-exit=1 --job-name=openood \
python main.py \
--config configs/datasets/{dataset} \
configs/networks/{network}.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/pixmix_preprocessor.yml \
--optimizer.num_epochs 100 \
--dataset.name {od}_osr \
--num_workers 8")
os.system(command)
| 789 | 31.916667 | 68 | py |
null | OpenOOD-main/scripts/uncertainty/randaugment/cifar100_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/randaugment/cifar100_test_ood_msp.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar100 \
--root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_randaugment-1-14 \
--postprocessor msp \
--save-score --save-csv
| 488 | 31.6 | 78 | sh |
null | OpenOOD-main/scripts/uncertainty/randaugment/cifar100_train_randaugment.sh | #!/bin/bash
# sh scripts/uncertainty/randaugment/cifar100_train_randaugment.sh
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/randaugment_preprocessor.yml \
--seed 0 \
--mark randaugment-1-14
| 336 | 29.636364 | 66 | sh |
null | OpenOOD-main/scripts/uncertainty/randaugment/cifar10_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/randaugment/cifar10_test_ood_msp.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar10 \
--root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_randaugment-1-14 \
--postprocessor msp \
--save-score --save-csv
| 485 | 31.4 | 77 | sh |
null | OpenOOD-main/scripts/uncertainty/randaugment/cifar10_train_randaugment.sh | #!/bin/bash
# sh scripts/uncertainty/randaugment/cifar10_train_randaugment.sh
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/randaugment_preprocessor.yml \
--seed 0 \
--mark randaugment-1-14
| 333 | 29.363636 | 65 | sh |
null | OpenOOD-main/scripts/uncertainty/randaugment/imagenet200_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/randaugment/imagenet200_test_ood_msp.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
# ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_randaugment-1-10 \
--postprocessor msp \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_randaugment-1-10 \
--postprocessor msp \
--save-score --save-csv --fsood
| 742 | 29.958333 | 83 | sh |
null | OpenOOD-main/scripts/uncertainty/randaugment/imagenet200_train_randaugment.sh | #!/bin/bash
# sh scripts/uncertainty/randaugment/imagenet200_train_randaugment.sh
python main.py \
--config configs/datasets/imagenet200/imagenet200.yml \
configs/networks/resnet18_224x224.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/randaugment_preprocessor.yml \
--preprocessor.m 10 \
--optimizer.num_epochs 90 \
--dataset.train.batch_size 128 \
--num_gpus 2 --num_workers 16 \
--merge_option merge \
--seed 0 \
--mark randaugment-1-10
| 505 | 30.625 | 69 | sh |
null | OpenOOD-main/scripts/uncertainty/randaugment/imagenet_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/randaugment/imagenet_test_ood_msp.sh
############################################
# we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood_imagenet.py
# available architectures:
# resnet50
# ood
python scripts/eval_ood_imagenet.py \
--ckpt-path ./results/imagenet_resnet50_base_e30_lr0.001_randaugment-2-9/s0/best.ckpt \
--arch resnet50 \
--postprocessor msp \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood_imagenet.py \
--ckpt-path ./results/imagenet_resnet50_base_e30_lr0.001_randaugment-2-9/s0/best.ckpt \
--arch resnet50 \
--postprocessor msp \
--save-score --save-csv --fsood
| 724 | 29.208333 | 90 | sh |
null | OpenOOD-main/scripts/uncertainty/randaugment/imagenet_train_randaugment.sh | #!/bin/bash
# sh scripts/uncertainty/randaugment/imagenet_train_randaugment.sh
python main.py \
--config configs/datasets/imagenet/imagenet.yml \
configs/networks/resnet50.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/randaugment_preprocessor.yml \
--preprocessor.n 2 \
--preprocessor.m 9 \
--network.pretrained True \
--network.checkpoint ./results/pretrained_weights/resnet50_imagenet1k_v1.pth \
--optimizer.lr 0.001 \
--optimizer.num_epochs 30 \
--dataset.train.batch_size 128 \
--num_gpus 2 --num_workers 16 \
--merge_option merge \
--seed 0 \
--mark randaugment-2-9
| 653 | 31.7 | 82 | sh |
null | OpenOOD-main/scripts/uncertainty/regmixup/cifar100_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/regmixup/cifar100_test_ood_msp.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar100 \
--root ./results/cifar100_resnet18_32x32_regmixup_e100_lr0.1_alpha10_default \
--postprocessor msp \
--save-score --save-csv
| 488 | 31.6 | 81 | sh |
null | OpenOOD-main/scripts/uncertainty/regmixup/cifar100_train_regmixup.sh | #!/bin/bash
# sh scripts/uncertainty/regmixup/cifar100_train_regmixup.sh
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/train_regmixup.yml \
configs/preprocessors/base_preprocessor.yml \
--trainer.trainer_args.alpha 10 \
--seed 0
| 337 | 29.727273 | 60 | sh |
null | OpenOOD-main/scripts/uncertainty/regmixup/cifar10_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/regmixup/cifar10_test_ood_msp.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar10 \
--root ./results/cifar10_resnet18_32x32_regmixup_e100_lr0.1_alpha20_default \
--postprocessor msp \
--save-score --save-csv
| 485 | 31.4 | 80 | sh |
null | OpenOOD-main/scripts/uncertainty/regmixup/cifar10_train_regmixup.sh | #!/bin/bash
# sh scripts/uncertainty/regmixup/cifar10_train_regmixup.sh
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/train/train_regmixup.yml \
configs/preprocessors/base_preprocessor.yml \
--trainer.trainer_args.alpha 20 \
--seed 0
| 334 | 29.454545 | 59 | sh |
null | OpenOOD-main/scripts/uncertainty/regmixup/imagenet200_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/regmixup/imagenet200_test_ood_msp.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
# ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_regmixup_e90_lr0.1_alpha10_default \
--postprocessor msp \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_regmixup_e90_lr0.1_alpha10_default \
--postprocessor msp \
--save-score --save-csv --fsood
| 745 | 30.083333 | 86 | sh |
null | OpenOOD-main/scripts/uncertainty/regmixup/imagenet200_train_regmixup.sh | #!/bin/bash
# sh scripts/uncertainty/regmixup/imagenet200_train_regmixup.sh
python main.py \
--config configs/datasets/imagenet200/imagenet200.yml \
configs/networks/resnet18_224x224.yml \
configs/pipelines/train/train_regmixup.yml \
configs/preprocessors/base_preprocessor.yml \
--trainer.trainer_args.alpha 10 \
--optimizer.num_epochs 90 \
--dataset.train.batch_size 128 \
--num_gpus 2 --num_workers 16 \
--merge_option merge \
--seed 0
| 480 | 31.066667 | 63 | sh |
null | OpenOOD-main/scripts/uncertainty/regmixup/imagenet_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/regmixup/imagenet_test_ood_msp.sh
############################################
# we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood_imagenet.py
# available architectures:
# resnet50
# ood
python scripts/eval_ood_imagenet.py \
--ckpt-path ./results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/best.ckpt \
--arch resnet50 \
--postprocessor msp \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood_imagenet.py \
--ckpt-path ./results/imagenet_resnet50_regmixup_e30_lr0.001_alpha10_default/s0/best.ckpt \
--arch resnet50 \
--postprocessor msp \
--save-score --save-csv --fsood
| 729 | 29.416667 | 94 | sh |
null | OpenOOD-main/scripts/uncertainty/regmixup/imagenet_train_regmixup.sh | #!/bin/bash
# sh scripts/uncertainty/regmixup/imagenet_train_regmixup.sh
python main.py \
--config configs/datasets/imagenet/imagenet.yml \
configs/networks/resnet50.yml \
configs/pipelines/train/train_regmixup.yml \
configs/preprocessors/base_preprocessor.yml \
--trainer.trainer_args.alpha 10 \
--network.pretrained True \
--network.checkpoint ./results/pretrained_weights/resnet50_imagenet1k_v1.pth \
--optimizer.lr 0.001 \
--optimizer.num_epochs 30 \
--dataset.train.batch_size 128 \
--num_gpus 2 --num_workers 16 \
--merge_option merge \
--seed 0
| 605 | 32.666667 | 82 | sh |
null | OpenOOD-main/scripts/uncertainty/rts/cifar100_test_ood_rts.sh | #!/bin/bash
# sh scripts/uncertainty/rts/cifar100_test_rts_msp.sh
# GPU=1
# CPU=1
# node=36
# jobname=openood
# PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/rts_net.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/rts.yml \
--network.backbone.name resnet18_32x32 \
--num_workers 8 \
--network.checkpoint 'results/cifar100_rts_net_rts_e100_lr0.1_default/best_epoch89_acc0.7850.ckpt' \
| 736 | 31.043478 | 100 | sh |
null | OpenOOD-main/scripts/uncertainty/rts/cifar100_train_rts.sh | #!/bin/bash
# sh scripts/uncertainty/rts/cifar100_train_rts.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} \
# -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/networks/rts_net.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/rts.yml \
--network.backbone.name resnet18_32x32 \
--num_workers 8 \
--trainer.name rts \
--optimizer.num_epochs 100 \
| 643 | 25.833333 | 51 | sh |
null | OpenOOD-main/scripts/uncertainty/styleaug/imagenet200_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/deepaugment/imagenet200_test_ood_msp.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
# ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_base_e45_lr0.1_stylized \
--postprocessor msp \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_base_e45_lr0.1_stylized \
--postprocessor msp \
--save-score --save-csv --fsood
| 726 | 29.291667 | 75 | sh |
null | OpenOOD-main/scripts/uncertainty/styleaug/imagenet200_train_styleaug.sh | #!/bin/bash
# sh scripts/uncertainty/styleaug/imagenet200_train_styleaug.sh
# the model sees twice the data as the baseline
# so only trains for 90/2=45 epochs
python main.py \
--config configs/datasets/imagenet200/imagenet200.yml \
configs/networks/resnet18_224x224.yml \
configs/pipelines/train/baseline.yml \
configs/preprocessors/base_preprocessor.yml \
--dataset.train.imglist_pth ./data/benchmark_imglist/imagenet200/train_imagenet200_stylized.txt \
--optimizer.num_epochs 45 \
--dataset.train.batch_size 128 \
--num_gpus 2 --num_workers 16 \
--merge_option merge \
--seed 0 \
--mark stylized
| 644 | 34.833333 | 101 | sh |
null | OpenOOD-main/scripts/uncertainty/styleaug/imagenet_test_ood_msp.sh | #!/bin/bash
# sh scripts/uncertainty/styleaug/imagenet_test_ood_msp.sh
############################################
# we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood_imagenet.py
# available architectures:
# resnet50
# ood
python scripts/eval_ood_imagenet.py \
--ckpt-path ./results/imagenet_resnet50_tvsv1_base_stylized/ckpt.pth \
--arch resnet50 \
--postprocessor msp \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood_imagenet.py \
--ckpt-path ./results/imagenet_resnet50_tvsv1_base_stylized/ckpt.pth \
--arch resnet50 \
--postprocessor msp \
--save-score --save-csv --fsood
| 687 | 27.666667 | 73 | sh |
null | OpenOOD-main/scripts/uncertainty/temp_scaling/0_tempscaling.sh | #!/bin/bash
# sh scripts/d_uncertainty/0_tempscaling.sh
# mnist
# GPU=1
# CPU=1
# node=73
# jobname=openood
# PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
# python main.py \
# --config configs/datasets/digits/mnist.yml \
# configs/datasets/digits/mnist_fsood.yml \
# configs/networks/lenet.yml \
# configs/pipelines/test/test_fsood.yml \
# configs/postprocessors/temperature_scaling.yml \
# configs/preprocessors/base_preprocessor.yml \
# --num_workers 8 \
# --network.checkpoint ./results/mnist_lenet_base_e100_lr0.1/best_epoch86_acc0.9920.ckpt \
# --mark 0 \
# --exp_name mnist_lenet_base_e100_lr0.1_test_fsood_temperature_scaling
# cifar10
GPU=1
CPU=1
node=73
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/objects/cifar10.yml \
configs/datasets/objects/cifar10_fsood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_fsood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/temperature_scaling.yml \
--num_workers 8 \
--mark 0 \
--network.checkpoint ./results/cifar10_resnet18_32x32_base_e100_lr0.1/best.ckpt \
--exp_name cifar10_resnet18_32x32_base_e100_lr0.1_test_fsood_temperature_scaling
| 1,518 | 30.645833 | 90 | sh |
null | OpenOOD-main/scripts/uncertainty/temp_scaling/cifar100_test_ood_tempscaling.sh | #!/bin/bash
# sh scripts/uncertainty/temp_scaling/cifar100_test_ood_tempscaling.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar100/cifar100.yml \
configs/datasets/cifar100/cifar100_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/temp_scaling.yml \
--num_workers 8 \
--network.checkpoint 'results/cifar100_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \
--mark 0
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar100 \
--root ./results/cifar100_resnet18_32x32_base_e100_lr0.1_default \
--postprocessor temp_scaling \
--save-score --save-csv
| 1,182 | 31.861111 | 97 | sh |
null | OpenOOD-main/scripts/uncertainty/temp_scaling/cifar10_test_ood_tempscaling.sh | #!/bin/bash
# sh scripts/uncertainty/temp_scaling/cifar10_test_ood_tempscaling.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/cifar10/cifar10.yml \
configs/datasets/cifar10/cifar10_ood.yml \
configs/networks/resnet18_32x32.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/temp_scaling.yml \
--num_workers 8 \
--network.checkpoint 'results/cifar10_resnet18_32x32_base_e100_lr0.1_default/s0/best.ckpt' \
--mark 0
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
python scripts/eval_ood.py \
--id-data cifar10 \
--root ./results/cifar10_resnet18_32x32_base_e100_lr0.1_default \
--postprocessor temp_scaling \
--save-score --save-csv
| 1,174 | 31.638889 | 96 | sh |
null | OpenOOD-main/scripts/uncertainty/temp_scaling/imagenet200_test_ood_tempscaling.sh | #!/bin/bash
# sh scripts/ood/temp_scaling/imagenet200_test_ood_tempscaling.sh
############################################
# alternatively, we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood.py
# especially if you want to get results from
# multiple runs
# ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \
--postprocessor temp_scaling \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood.py \
--id-data imagenet200 \
--root ./results/imagenet200_resnet18_224x224_base_e90_lr0.1_default \
--postprocessor temp_scaling \
--save-score --save-csv --fsood
| 743 | 30 | 74 | sh |
null | OpenOOD-main/scripts/uncertainty/temp_scaling/imagenet_test_ood_tempscaling.sh | #!/bin/bash
# sh scripts/ood/temp_scaling/imagenet_test_ood_tempscaling.sh
GPU=1
CPU=1
node=73
jobname=openood
PYTHONPATH='.':$PYTHONPATH \
#srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
#--cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
#--kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/imagenet/imagenet.yml \
configs/datasets/imagenet/imagenet_ood.yml \
configs/networks/resnet50.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/temp_scaling.yml \
--num_workers 10 \
--ood_dataset.image_size 256 \
--dataset.test.batch_size 256 \
--dataset.val.batch_size 256 \
--network.pretrained True \
--network.checkpoint 'results/pretrained_weights/resnet50_imagenet1k_v1.pth' \
--merge_option merge
############################################
# we recommend using the
# new unified, easy-to-use evaluator with
# the example script scripts/eval_ood_imagenet.py
# available architectures:
# resnet50, swin-t, vit-b-16
# ood
python scripts/eval_ood_imagenet.py \
--tvs-pretrained \
--arch resnet50 \
--postprocessor temp_scaling \
--save-score --save-csv #--fsood
# full-spectrum ood
python scripts/eval_ood_imagenet.py \
--tvs-pretrained \
--arch resnet50 \
--postprocessor temp_scaling \
--save-score --save-csv --fsood
| 1,427 | 28.142857 | 82 | sh |
null | OpenOOD-main/scripts/uncertainty/temp_scaling/mnist_test_ood_tempscaling.sh | #!/bin/bash
# sh scripts/uncertainty/temp_scaling/mnist_test_ood_tempscaling.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/mnist/mnist.yml \
configs/datasets/mnist/mnist_ood.yml \
configs/networks/lenet.yml \
configs/pipelines/test/test_ood.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/temp_scaling.yml \
--num_workers 8 \
--network.checkpoint 'results/checkpoints/mnist_lenet_acc99.30.ckpt' \
--mark 0
| 685 | 27.583333 | 73 | sh |
null | OpenOOD-main/scripts/uncertainty/temp_scaling/osr_mnist6_test_ood_tempscaling.sh | #!/bin/bash
# sh scripts/uncertainty/temp_scaling/osr_mnist6_test_ood_tempscaling.sh
# GPU=1
# CPU=1
# node=73
# jobname=openood
PYTHONPATH='.':$PYTHONPATH \
# srun -p dsta --mpi=pmi2 --gres=gpu:${GPU} -n1 \
# --cpus-per-task=${CPU} --ntasks-per-node=${GPU} \
# --kill-on-bad-exit=1 --job-name=${jobname} -w SG-IDC1-10-51-2-${node} \
python main.py \
--config configs/datasets/osr_mnist6/mnist6_seed1.yml \
configs/datasets/osr_mnist6/mnist6_seed1_ood.yml \
configs/networks/lenet.yml \
configs/pipelines/test/test_osr.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/temp_scaling.yml \
--num_workers 8 \
--network.checkpoint 'results/checkpoints/osr/mnist6_seed1.ckpt' \
--mark 0
| 710 | 28.625 | 73 | sh |
null | OpenOOD-main/scripts/uncertainty/temp_scaling/sweep_osr.py | # python scripts/uncertainty/temp_scaling/sweep_osr.py
import os
config = [
[
'osr_cifar6/cifar6_seed1.yml', 'osr_cifar6/cifar6_seed1_ood.yml',
'resnet18_32x32', 'results/checkpoints/osr/cifar6_seed1.ckpt'
],
[
'osr_cifar50/cifar50_seed1.yml', 'osr_cifar50/cifar50_seed1_ood.yml',
'resnet18_32x32', 'results/checkpoints/osr/cifar50_seed1.ckpt'
],
[
'osr_tin20/tin20_seed1.yml', 'osr_tin20/tin20_seed1_ood.yml',
'resnet18_64x64', 'results/checkpoints/osr/tin20_seed1.ckpt'
],
[
'osr_mnist6/mnist6_seed1.yml', 'osr_mnist6/mnist6_seed1_ood.yml',
'lenet', 'results/checkpoints/osr/mnist6_seed1.ckpt'
],
]
for [dataset, ood_dataset, network, pth] in config:
command = (f"PYTHONPATH='.':$PYTHONPATH \
srun -p dsta --mpi=pmi2 --gres=gpu:1 -n1 \
--cpus-per-task=1 --ntasks-per-node=1 \
--kill-on-bad-exit=1 --job-name=openood \
python main.py \
--config configs/datasets/{dataset} \
configs/datasets/{ood_dataset} \
configs/networks/{network}.yml \
configs/pipelines/test/test_osr.yml \
configs/preprocessors/base_preprocessor.yml \
configs/postprocessors/temp_scaling.yml \
--network.checkpoint {pth} \
--num_workers 8 \
--merge_option merge &")
os.system(command)
| 1,316 | 32.769231 | 77 | py |
null | OpenOOD-main/tools/plot/tsne_tools.py | # srun -p dsta --mpi=pmi2 --cpus-per-task=1
# --kill-on-bad-exit=1 --job-name=tsne -w SG-IDC1-10-51-2-73
# python compute_tsne.py
import os
import time
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
l2_normalize = lambda x: x / np.linalg.norm(x, axis=1, keepdims=True)
def tsne_compute(x, n_components=50):
start_time = time.time()
if n_components < x.shape[1]:
pca = PCA(n_components=50)
x = pca.fit_transform(x)
tsne = TSNE(n_components=2, verbose=0, perplexity=40, n_iter=2000)
tsne_pos = tsne.fit_transform(x)
hours, rem = divmod(time.time() - start_time, 3600)
minutes, seconds = divmod(rem, 60)
print('TSNE Computation Duration: {:0>2}:{:0>2}:{:05.2f}'.format(
int(hours), int(minutes), seconds),
flush=True)
return tsne_pos
dataset_list = [
'mnist', 'usps', 'svhn', 'notmnist', 'fashionmnist', 'texture', 'cifar10',
'tin'
]
dirname = '/mnt/lustre/jkyang/FSOOD22/report/test/test_tsne'
sample_rate = 0.1
highfeat_list, featstat_list, idx_list = [], [], []
for idx, dataset in enumerate(dataset_list):
file_name = os.path.join(dirname, f'{dataset}.npz')
highfeat_sublist = np.load(file_name)['highfeat_list']
featstat_sublist = np.load(file_name)['featstat_list']
# label_list = np.load(file_name)['label_list']
# selection:
num_samples = len(highfeat_sublist)
index_list = np.arange(num_samples)
index_select = np.random.choice(index_list,
int(sample_rate * num_samples),
replace=False)
highfeat_list.extend(highfeat_sublist[index_select])
featstat_list.extend(featstat_sublist[index_select])
idx_list.extend(idx * np.ones(len(index_select)))
highfeat_list, featstat_list, index_list = np.array(highfeat_list), np.array(
featstat_list), np.array(idx_list)
tsne_pos_highfeat = tsne_compute(highfeat_list)
tsne_pos_lowfeat = tsne_compute(featstat_list)
np.save(os.path.join(dirname, 'tsne_pos_highfeat'), tsne_pos_highfeat)
np.save(os.path.join(dirname, 'tsne_pos_lowfeat'), tsne_pos_lowfeat)
np.save(os.path.join(dirname, 'idx'), idx_list)
| 2,194 | 34.403226 | 78 | py |
null | OpenOOD-main/tools/sweep/hyperparam.py | 0 | 0 | 0 | py | |
ILA | ILA-master/README.md |
# [ICCV'2023] Implicit Temporal Modeling with Learnable Alignment for Video Recognition
This is an official implementation of [ILA](https://arxiv.org/abs/2304.10465), a new temporal modeling method for video action recognition.
> [**Implicit Temporal Modeling with Learnable Alignment for Video Recognition**](https://arxiv.org/abs/2304.10465)<br>
> accepted by ICCV 2023<br>
> Shuyuan Tu, [Qi Dai](https://scholar.google.com/citations?user=NSJY12IAAAAJ), [Zuxuan Wu](https://zxwu.azurewebsites.net/), [Zhi-Qi Cheng](https://scholar.google.com/citations?user=uB2He2UAAAAJ), [Han Hu](https://ancientmooner.github.io/), [Yu-Gang Jiang](https://fvl.fudan.edu.cn/)
[[arxiv]](https://arxiv.org/abs/2304.10465)
<div align="center">
<img width="80%" alt="ILA performance" src="figures/performance.png"/>
</div>
# News
- :star2: **[July, 2023]** ILA has been accepted by ICCV2023.
# Environment Setup
To set up the environment, you can easily run the following command:
```
pip install torch==1.11.0
pip install torchvision==0.12.0
pip install pathlib
pip install mmcv-full
pip install decord
pip install ftfy
pip install einops
pip install termcolor
pip install timm
pip install regex
```
Install Apex as follows
```
git clone https://github.com/NVIDIA/apex
cd apex
pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./
```
# Data Preparation
For downloading the Kinetics datasets, you can refer to [mmaction2](https://github.com/open-mmlab/mmaction2/blob/master/tools/data/kinetics/README.md) or [CVDF](https://github.com/cvdfoundation/kinetics-dataset). For [Something-Something v2](https://developer.qualcomm.com/software/ai-datasets/something-something), you can get them from the official website.
Due to limited storage, we decord the videos in an online fashion using [decord](https://github.com/dmlc/decord).
We provide the following way to organize the dataset:
- **Standard Folder:** For standard folder, put all videos in the `videos` folder, and prepare the annotation files as `train.txt` and `val.txt`. Please make sure the folder looks like this:
```Shell
$ ls /PATH/TO/videos | head -n 2
a.mp4
b.mp4
$ head -n 2 /PATH/TO/train.txt
a.mp4 0
b.mp4 2
$ head -n 2 /PATH/TO/val.txt
c.mp4 1
d.mp4 2
```
# Train
The training configurations lie in `configs`. For example, you can run the following command to train ILA-ViT-B/16 with 8 frames on Something-Something v2.
```
python -m torch.distributed.launch --nproc_per_node=8 main.py -cfg configs/ssv2/16_8.yaml --output /PATH/TO/OUTPUT --accumulation-steps 8
```
**Note:**
- We recommend setting the total batch size to 256.
- Please specify the data path in config file(`configs/*.yaml`). For standard folder, set that to `/PATH/TO/videos` naturally.
- The pretrained CLIP is specified by using `--pretrained /PATH/TO/PRETRAINED`.
# Test
For example, you can run the following command to validate the ILA-ViT-B/16 with 8 frames on Something-Something v2.
```
python -m torch.distributed.launch --nproc_per_node=8 main.py -cfg configs/ssv2/16_8.yaml --output /PATH/TO/OUTPUT --only_test --resume /PATH/TO/CKPT --opts TEST.NUM_CLIP 4 TEST.NUM_CROP 3
```
**Note:**
- According to our experience and sanity checks, there is a reasonable random variation about accuracy when testing on different machines.
- There are two parts in the provided logs. The first part is conventional training followed by validation per epoch with single-view. The second part refers to the multiview (3 crops x 4 clips) inference logs.
# Main Results in paper
This is an original-implementation for open-source use. In the following table we report the accuracy in original paper.
- Fully-supervised on Kinetics-400:
| Model | Input | Top-1 Acc.(%) | Top-5 Acc.(%)| ckpt |
|:--:|--:|:--:|:--:|:--:|
| ILA-B/32 | 8x224 | 81.3 | 95.0 | [GoogleDrive](https://drive.google.com/file/d/1hbl3nndAcxENsif0QuEJ_8UxiTv9jSOh/view?usp=share_link) |
| ILA-B/32 | 16x224 | 82.4 | 95.8 | [GoogleDrive](https://drive.google.com/file/d/1GEO8m1qfDsOj-81YW5jc8ekAxPWMrtg3/view?usp=share_link) |
| ILA-B/16 | 8x224 | 84.0 | 96.6 | [GoogleDrive](https://drive.google.com/file/d/1lAUgzxBDHoueXDaG9X9WFlBPVBl5LNdk/view?usp=share_link) |
| ILA-B/16 | 16x224 | 85.7 | 97.2 | [GoogleDrive](https://drive.google.com/file/d/1IYz8DzzgoNbU1aXyeHFl69t6pb0RNMsV/view?usp=share_link) |
| ILA-L/14 | 8x224 | 88.0 | 98.1 | [GoogleDrive](https://drive.google.com/file/d/1IhalyRKqAbJ9efLAgAA4f7t9hwRMux0p/view?usp=share_link) |
| ILA-L/14 | 16x336 | 88.7 | 97.8 |[GoogleDrive](https://drive.google.com/file/d/132aB_FF-jPKP1z3kZArG_tOj32rVIFYc/view?usp=share_link) |
- Fully-supervised on Something-Something v2:
| Model | Input | Top-1 Acc.(%) | Top-5 Acc.(%)| ckpt |
|:--:|--:|:--:|:--:|:--:|
| ILA-B/16 | 8x224 | 65.0 | 89.2 | [GoogleDrive](https://drive.google.com/file/d/1Ei0fO-W8u4jBQO0qG6V7nlRjKUuozbLa/view?usp=share_link) |
| ILA-B/16 | 16x224 | 66.8 | 90.3 | [GoogleDrive](https://drive.google.com/file/d/1NYXsYSOjRuuOIXiCTaTE2gFUCi-1qC_5/view?usp=share_link) |
| ILA-L/14 | 8x224 | 67.8 | 90.5 | [GoogleDrive](https://drive.google.com/file/d/10xHxzyUH38bjkAAutlBvMkj0BxXaaobB/view?usp=share_link) |
| ILA-L/14 | 16x336 | 70.2 | 91.8 | [GoogleDrive](https://drive.google.com/file/d/1ZquukTAxosC2k1uquI9jnMsk4BA2N5w5/view?usp=share_link) |
# Bibtex
If this project is useful for you, please consider citing our paper :
```
@article{ILA,
title={Implicit Temporal Modeling with Learnable Alignment for Video Recognition},
author={Tu, Shuyuan and Dai, Qi and Wu, Zuxuan and Cheng, Zhi-Qi and Hu, Han and Jiang, Yu-Gang},
journal={arXiv preprint arXiv:2304.10465},
year={2023}
}
```
# Acknowledgements
Parts of the codes are borrowed from [mmaction2](https://github.com/open-mmlab/mmaction2), [Swin](https://github.com/microsoft/Swin-Transformer) and [X-CLIP](https://github.com/microsoft/VideoX/tree/master/X-CLIP). Sincere thanks to their wonderful works.
| 6,068 | 48.341463 | 359 | md |
ILA | ILA-master/main.py | import os
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import argparse
import datetime
import shutil
from pathlib import Path
from PIL import Image
from einops import rearrange
from utils.config import get_config
from utils.optimizer import build_optimizer, build_scheduler
from utils.helper import AverageMeter, epoch_saving, load_checkpoint, generate_text, auto_resume_helper
from datasets.build import build_dataloader, heatmap_data_process
from utils.logger import create_logger
import time
import numpy as np
import random
from apex import amp
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from datasets.blending import CutmixMixupBlending
from utils.config import get_config
from models import xclip
def parse_option():
parser = argparse.ArgumentParser()
parser.add_argument('--config', '-cfg', required=True, type=str, default='configs/k400/32_8.yaml')
parser.add_argument(
"--opts",
help="Modify config options by adding 'KEY VALUE' pairs. ",
default=None,
nargs='+',
)
parser.add_argument('--output', type=str, default="exp")
parser.add_argument('--resume', type=str)
parser.add_argument('--pretrained', type=str)
parser.add_argument('--only_test', action='store_true')
parser.add_argument('--batch-size', type=int)
parser.add_argument('--accumulation-steps', type=int)
parser.add_argument("--local_rank", type=int, default=-1, help='local rank for DistributedDataParallel')
args = parser.parse_args()
config = get_config(args)
return args, config
def main(config):
config.defrost()
config.TEST.NUM_CLIP = 4
config.TEST.NUM_CROP = 3
config.freeze()
train_data, val_data, train_loader, val_loader = build_dataloader(logger, config)
model, _ = xclip.load(config.MODEL.PRETRAINED, config.MODEL.ARCH,
device="cpu", jit=False,
T=config.DATA.NUM_FRAMES,
droppath=config.MODEL.DROP_PATH_RATE,
use_checkpoint=config.TRAIN.USE_CHECKPOINT,
use_cache=config.MODEL.FIX_TEXT,
logger=logger,
)
model = model.cuda()
mixup_fn = None
if config.AUG.MIXUP > 0:
criterion = SoftTargetCrossEntropy()
mixup_fn = CutmixMixupBlending(num_classes=config.DATA.NUM_CLASSES,
smoothing=config.AUG.LABEL_SMOOTH,
mixup_alpha=config.AUG.MIXUP,
cutmix_alpha=config.AUG.CUTMIX,
switch_prob=config.AUG.MIXUP_SWITCH_PROB)
elif config.AUG.LABEL_SMOOTH > 0:
criterion = LabelSmoothingCrossEntropy(smoothing=config.AUG.LABEL_SMOOTH)
else:
criterion = nn.CrossEntropyLoss()
optimizer = build_optimizer(config, model)
lr_scheduler = build_scheduler(config, optimizer, len(train_loader))
if config.TRAIN.OPT_LEVEL != 'O0':
model, optimizer = amp.initialize(models=model, optimizers=optimizer, opt_level=config.TRAIN.OPT_LEVEL)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[config.LOCAL_RANK], broadcast_buffers=False, find_unused_parameters=True)
start_epoch, max_accuracy = 0, 0.0
if config.TRAIN.AUTO_RESUME:
resume_file = auto_resume_helper(config.OUTPUT)
if resume_file:
config.defrost()
config.MODEL.RESUME = resume_file
config.freeze()
logger.info(f'auto resuming from {resume_file}')
else:
logger.info(f'no checkpoint found in {config.OUTPUT}, ignoring auto resume')
if config.MODEL.RESUME:
start_epoch, max_accuracy = load_checkpoint(config, model.module, optimizer, lr_scheduler, logger)
text_labels = generate_text(train_data)
if config.TEST.ONLY_TEST:
acc1 = validate(val_loader, text_labels, model, config)
logger.info(f"Accuracy of the network on the {len(val_data)} test videos: {acc1:.1f}%")
return
for epoch in range(start_epoch, config.TRAIN.EPOCHS):
train_loader.sampler.set_epoch(epoch)
train_one_epoch(epoch, model, criterion, optimizer, lr_scheduler, train_loader, text_labels, config, mixup_fn)
acc1 = validate(val_loader, text_labels, model, config)
logger.info(f"Accuracy of the network on the {len(val_data)} test videos: {acc1:.1f}%")
is_best = acc1 > max_accuracy
max_accuracy = max(max_accuracy, acc1)
logger.info(f'Max accuracy: {max_accuracy:.2f}%')
if dist.get_rank() == 0 and (epoch % config.SAVE_FREQ == 0 or epoch == (config.TRAIN.EPOCHS - 1)):
epoch_saving(config, epoch, model.module, max_accuracy, optimizer, lr_scheduler, logger, config.OUTPUT, is_best)
config.defrost()
config.TEST.NUM_CLIP = 4
config.TEST.NUM_CROP = 3
config.freeze()
train_data, val_data, train_loader, val_loader = build_dataloader(logger, config)
acc1 = validate(val_loader, text_labels, model, config)
logger.info(f"Accuracy of the network on the {len(val_data)} test videos: {acc1:.1f}%")
def train_one_epoch(epoch, model, criterion, optimizer, lr_scheduler, train_loader, text_labels, config, mixup_fn):
num_layers = len(model.module.visual.transformer.resblocks)
for i in range(num_layers):
model.module.visual.transformer.resblocks[i].message_attn.is_training = True
if epoch >= 30:
for i in range(num_layers):
model.module.visual.transformer.resblocks[i].message_attn.align_decay()
model.train()
optimizer.zero_grad()
num_steps = len(train_loader)
batch_time = AverageMeter()
tot_loss_meter = AverageMeter()
start = time.time()
end = time.time()
texts = text_labels.cuda(non_blocking=True)
for idx, batch_data in enumerate(train_loader):
images = batch_data["imgs"].cuda(non_blocking=True)
label_id = batch_data["label"].cuda(non_blocking=True)
label_id = label_id.reshape(-1)
images = images.view((-1, config.DATA.NUM_FRAMES, 3)+images.size()[-2:])
if mixup_fn is not None:
images, label_id = mixup_fn(images, label_id)
if texts.shape[0] == 1:
texts = texts.view(1, -1)
output, cos_loss_list = model(images, texts)
total_loss = criterion(output, label_id)
cosine_loss = sum(cos_loss_list)
total_loss = total_loss + cosine_loss
total_loss = total_loss / config.TRAIN.ACCUMULATION_STEPS
if config.TRAIN.ACCUMULATION_STEPS == 1:
optimizer.zero_grad()
if config.TRAIN.OPT_LEVEL != 'O0':
with amp.scale_loss(total_loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
total_loss.backward()
if config.TRAIN.ACCUMULATION_STEPS > 1:
if (idx + 1) % config.TRAIN.ACCUMULATION_STEPS == 0:
optimizer.step()
optimizer.zero_grad()
lr_scheduler.step_update(epoch * num_steps + idx)
else:
optimizer.step()
lr_scheduler.step_update(epoch * num_steps + idx)
torch.cuda.synchronize()
tot_loss_meter.update(total_loss.item(), len(label_id))
batch_time.update(time.time() - end)
end = time.time()
if idx % config.PRINT_FREQ == 0:
lr = optimizer.param_groups[0]['lr']
memory_used = torch.cuda.max_memory_allocated() / (1024.0 * 1024.0)
etas = batch_time.avg * (num_steps - idx)
logger.info(
f'Train: [{epoch}/{config.TRAIN.EPOCHS}][{idx}/{num_steps}]\t'
f'eta {datetime.timedelta(seconds=int(etas))} lr {lr:.9f}\t'
f'time {batch_time.val:.4f} ({batch_time.avg:.4f})\t'
f'tot_loss {tot_loss_meter.val:.4f} ({tot_loss_meter.avg:.4f})\t'
f'mem {memory_used:.0f}MB')
epoch_time = time.time() - start
logger.info(f"EPOCH {epoch} training takes {datetime.timedelta(seconds=int(epoch_time))}")
@torch.no_grad()
def validate(val_loader, text_labels, model, config):
model.eval()
num_layers = len(model.module.visual.transformer.resblocks)
for i in range(num_layers):
model.module.visual.transformer.resblocks[i].message_attn.is_training = False
acc1_meter, acc5_meter = AverageMeter(), AverageMeter()
with torch.no_grad():
text_inputs = text_labels.cuda()
logger.info(f"{config.TEST.NUM_CLIP * config.TEST.NUM_CROP} views inference")
for idx, batch_data in enumerate(val_loader):
_image = batch_data["imgs"]
label_id = batch_data["label"]
label_id = label_id.reshape(-1)
b, tn, c, h, w = _image.size()
t = config.DATA.NUM_FRAMES
n = tn // t
_image = _image.view(b, n, t, c, h, w)
tot_similarity = torch.zeros((b, config.DATA.NUM_CLASSES)).cuda()
for i in range(n):
image = _image[:, i, :, :, :, :]
label_id = label_id.cuda(non_blocking=True)
image_input = image.cuda(non_blocking=True)
if config.TRAIN.OPT_LEVEL == 'O2':
image_input = image_input.half()
output, cos_loss_list = model(image_input, text_inputs)
similarity = output.view(b, -1).softmax(dim=-1)
tot_similarity += similarity
values_1, indices_1 = tot_similarity.topk(1, dim=-1)
values_5, indices_5 = tot_similarity.topk(5, dim=-1)
acc1, acc5 = 0, 0
for i in range(b):
if indices_1[i] == label_id[i]:
acc1 += 1
if label_id[i] in indices_5[i]:
acc5 += 1
acc1_meter.update(float(acc1) / b * 100, b)
acc5_meter.update(float(acc5) / b * 100, b)
if idx % config.PRINT_FREQ == 0:
logger.info(
f'Test: [{idx}/{len(val_loader)}]\t'
f'Acc@1: {acc1_meter.avg:.3f}\t'
)
logger.info(f' * Acc@1 {acc1_meter.avg:.3f} Acc@5 {acc5_meter.avg:.3f}')
return acc1_meter.avg
if __name__ == '__main__':
# prepare config
args, config = parse_option()
# init_distributed
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
rank = int(os.environ["RANK"])
world_size = int(os.environ['WORLD_SIZE'])
print(f"RANK and WORLD_SIZE in environ: {rank}/{world_size}")
else:
rank = -1
world_size = -1
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://', world_size=world_size, rank=rank)
torch.distributed.barrier(device_ids=[args.local_rank])
seed = config.SEED + dist.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
cudnn.benchmark = True
# create working_dir
Path(config.OUTPUT).mkdir(parents=True, exist_ok=True)
# logger
logger = create_logger(output_dir=config.OUTPUT, dist_rank=dist.get_rank(), name=f"{config.MODEL.ARCH}")
logger.info(f"working dir: {config.OUTPUT}")
# save config
if dist.get_rank() == 0:
logger.info(config)
shutil.copy(args.config, config.OUTPUT)
main(config) | 11,613 | 38.104377 | 146 | py |
ILA | ILA-master/clip/__init__.py | from .clip import *
| 23 | 3.8 | 19 | py |
ILA | ILA-master/clip/clip.py | import hashlib
import os
import urllib
import warnings
from typing import Union, List
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
# from .model import build_model
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
import sys
sys.path.append("../")
from models.xclip import build_model
__all__ = ["available_models", "load", "tokenize", "_download", "_MODELS"]
_tokenizer = _Tokenizer()
_MODELS = {
"ViT-B/32": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
"ViT-B/16": "https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
"ViT-L/14": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
"ViT-L/14@336px": "https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt",
}
def _download(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
expected_sha256 = url.split("/")[-2]
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def _transform(n_px):
return Compose([
Resize(n_px, interpolation=Image.BICUBIC),
CenterCrop(n_px),
lambda image: image.convert("RGB"),
ToTensor(),
Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
])
def available_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list(_MODELS.keys())
def load(model_path, name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit=True, T=8, droppath=0., use_checkpoint=False, logger=None, use_cache=True, prompts_alpha=1e-1, prompts_layers=2, mit_layers=1,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if model_path is None:
model_path = _download(_MODELS[name])
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
model = build_model(state_dict or model.state_dict(), T=T, droppath=droppath,
use_checkpoint=use_checkpoint, logger=logger,
prompts_alpha=prompts_alpha,
prompts_layers=prompts_layers,
use_cache=use_cache,
mit_layers=mit_layers,
)
if str(device) == "cpu":
model.float()
return model, model.state_dict()
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
graphs = [module.graph] if hasattr(module, "graph") else []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
return model, _transform(model.input_resolution.item())
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<|startoftext|>"]
eot_token = _tokenizer.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
raise RuntimeError(f"Input {texts[i]} is too long for context length {context_length}")
result[i, :len(tokens)] = torch.tensor(tokens)
return result
| 7,595 | 37.363636 | 154 | py |
ILA | ILA-master/clip/model.py | import copy
from collections import OrderedDict
from typing import Tuple, Union
from timm.models.layers import trunc_normal_
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from einops import rearrange
from torch.utils.checkpoint import checkpoint_sequential
import math
import clip
def drop_path(x, drop_prob: float = 0., training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
# orig_type = x.dtype
# ret = super().forward(x.type(torch.float32))
# return ret.type(orig_type)
return super().forward(x)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, ):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head,)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisionTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int
):
super().__init__()
self.context_length = context_length
# vision_heads = vision_width // 64
# self.visual = VisionTransformer(
# input_resolution=image_resolution,
# patch_size=vision_patch_size,
# width=vision_width,
# layers=vision_layers,
# heads=vision_heads,
# output_dim=embed_dim
# )
# self.transformer = Transformer(
# width=transformer_width,
# layers=transformer_layers,
# heads=transformer_heads,
# attn_mask=self.build_attention_mask()
# )
# self.vocab_size = vocab_size
# self.token_embedding = nn.Embedding(vocab_size, transformer_width)
# self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
# self.ln_final = LayerNorm(transformer_width)
# self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
# self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
# self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, image):
return self.visual(image.type(self.dtype))
def encode_text(self, text):
x = self.token_embedding(text).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
image_features = self.encode_image(image)
text_features = self.encode_text(text)
# normalized features
image_features = image_features / image_features.norm(dim=1, keepdim=True)
text_features = text_features / text_features.norm(dim=1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
# shape = [global_batch_size, global_batch_size]
return logits_per_image, logits_per_text
| 9,197 | 38.646552 | 178 | py |
ILA | ILA-master/clip/model_zoo.py | import os
def get_model_path(ckpt):
if os.path.isfile(ckpt):
return ckpt
else:
print('not found pretrained model in {}'.format(ckpt))
raise FileNotFoundError
| 190 | 22.875 | 62 | py |
ILA | ILA-master/clip/simple_tokenizer.py | import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe()):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
| 4,628 | 33.804511 | 144 | py |
ILA | ILA-master/configs/k400/14_16_336.yaml | DATA:
ROOT: '/PATH/TO/videos'
TRAIN_FILE: '/PATH/TO/train_list_videos.txt'
VAL_FILE: '/PATH/TO/val_list_videos.txt'
DATASET: kinetics400
NUM_FRAMES: 16
NUM_CLASSES: 400
LABEL_LIST: 'labels/kinetics_400_labels.csv'
INPUT_SIZE: 336
MODEL:
ARCH: ViT-L/14@336px
TRAIN:
BATCH_SIZE: 8
ACCUMULATION_STEPS: 4 | 344 | 23.642857 | 48 | yaml |
ILA | ILA-master/configs/k400/14_8.yaml | DATA:
ROOT: '/PATH/TO/videos'
TRAIN_FILE: '/PATH/TO/train_list_videos.txt'
VAL_FILE: '/PATH/TO/val_list_videos.txt'
DATASET: kinetics400
NUM_FRAMES: 8
NUM_CLASSES: 400
LABEL_LIST: 'labels/kinetics_400_labels.csv'
MODEL:
ARCH: ViT-L/14
TRAIN:
BATCH_SIZE: 8
ACCUMULATION_STEPS: 4 | 317 | 23.461538 | 48 | yaml |
ILA | ILA-master/configs/k400/16_16.yaml | DATA:
ROOT: '/PATH/TO/videos'
TRAIN_FILE: '/PATH/TO/train_list_videos.txt'
VAL_FILE: '/PATH/TO/val_list_videos.txt'
DATASET: kinetics400
NUM_FRAMES: 16
NUM_CLASSES: 400
LABEL_LIST: 'labels/kinetics_400_labels.csv'
MODEL:
ARCH: ViT-B/32
TRAIN:
BATCH_SIZE: 8
ACCUMULATION_STEPS: 4 | 318 | 23.538462 | 48 | yaml |
ILA | ILA-master/configs/k400/16_8.yaml | DATA:
ROOT: '/PATH/TO/videos'
TRAIN_FILE: '/PATH/TO/train_list_videos.txt'
VAL_FILE: '/PATH/TO/val_list_videos.txt'
DATASET: kinetics400
NUM_FRAMES: 8
NUM_CLASSES: 400
LABEL_LIST: 'labels/kinetics_400_labels.csv'
MODEL:
ARCH: ViT-B/32
TRAIN:
BATCH_SIZE: 8
ACCUMULATION_STEPS: 4 | 317 | 23.461538 | 48 | yaml |
ILA | ILA-master/configs/k400/32_16.yaml | DATA:
ROOT: '/PATH/TO/videos'
TRAIN_FILE: '/PATH/TO/train_list_videos.txt'
VAL_FILE: '/PATH/TO/val_list_videos.txt'
DATASET: kinetics400
NUM_FRAMES: 16
NUM_CLASSES: 400
LABEL_LIST: 'labels/kinetics_400_labels.csv'
MODEL:
ARCH: ViT-B/32
TRAIN:
BATCH_SIZE: 8
ACCUMULATION_STEPS: 4 | 318 | 23.538462 | 48 | yaml |
ILA | ILA-master/configs/k400/32_8.yaml | DATA:
ROOT: '/PATH/TO/videos'
TRAIN_FILE: '/PATH/TO/train_list_videos.txt'
VAL_FILE: '/PATH/TO/val_list_videos.txt'
DATASET: kinetics400
NUM_FRAMES: 8
NUM_CLASSES: 400
LABEL_LIST: 'labels/kinetics_400_labels.csv'
MODEL:
ARCH: ViT-B/32
TRAIN:
BATCH_SIZE: 8
ACCUMULATION_STEPS: 4 | 317 | 23.461538 | 48 | yaml |
ILA | ILA-master/configs/ssv2/14_16_336.yaml | DATA:
ROOT: '/PATH/TO/videos'
TRAIN_FILE: '/PATH/TO/train_list_videos.txt'
VAL_FILE: '/PATH/TO/val_list_videos.txt'
DATASET: something-somethingv2
NUM_FRAMES: 16
NUM_CLASSES: 174
LABEL_LIST: 'labels/something-something-v2-labels.csv'
INPUT_SIZE: 336
MODEL:
ARCH: ViT-L/14@336px
TRAIN:
BATCH_SIZE: 1
ACCUMULATION_STEPS: 32 | 365 | 25.142857 | 58 | yaml |
ILA | ILA-master/configs/ssv2/14_8.yaml | DATA:
ROOT: '/PATH/TO/videos'
TRAIN_FILE: '/PATH/TO/train_list_videos.txt'
VAL_FILE: '/PATH/TO/val_list_videos.txt'
DATASET: something-somethingv2
NUM_FRAMES: 8
NUM_CLASSES: 174
LABEL_LIST: 'labels/something-something-v2-labels.csv'
MODEL:
ARCH: ViT-L/14
TRAIN:
BATCH_SIZE: 4
ACCUMULATION_STEPS: 8 | 337 | 25 | 58 | yaml |
ILA | ILA-master/configs/ssv2/16_16.yaml | DATA:
ROOT: '/PATH/TO/videos'
TRAIN_FILE: '/PATH/TO/train_list_videos.txt'
VAL_FILE: '/PATH/TO/val_list_videos.txt'
DATASET: something-somethingv2
NUM_FRAMES: 16
NUM_CLASSES: 174
LABEL_LIST: 'labels/something-something-v2-labels.csv'
MODEL:
ARCH: ViT-B/16
TRAIN:
BATCH_SIZE: 4
ACCUMULATION_STEPS: 8 | 338 | 25.076923 | 58 | yaml |
ILA | ILA-master/configs/ssv2/16_32.yaml | DATA:
ROOT: '/PATH/TO/videos'
TRAIN_FILE: '/PATH/TO/train_list_videos.txt'
VAL_FILE: '/PATH/TO/val_list_videos.txt'
DATASET: something-somethingv2
NUM_FRAMES: 32
NUM_CLASSES: 174
LABEL_LIST: 'labels/something-something-v2-labels.csv'
MODEL:
ARCH: ViT-B/16
TRAIN:
BATCH_SIZE: 4
ACCUMULATION_STEPS: 8 | 338 | 25.076923 | 58 | yaml |
ILA | ILA-master/configs/ssv2/16_8.yaml | DATA:
ROOT: '/PATH/TO/videos'
TRAIN_FILE: '/PATH/TO/train_list_videos.txt'
VAL_FILE: '/PATH/TO/val_list_videos.txt'
DATASET: something-somethingv2
NUM_FRAMES: 8
NUM_CLASSES: 174
LABEL_LIST: 'labels/something-something-v2-labels.csv'
MODEL:
ARCH: ViT-B/16
TRAIN:
BATCH_SIZE: 8
ACCUMULATION_STEPS: 4 | 337 | 25 | 58 | yaml |
ILA | ILA-master/datasets/__init__.py | 0 | 0 | 0 | py | |
ILA | ILA-master/datasets/blending.py | from abc import ABCMeta, abstractmethod
import torch
import torch.nn.functional as F
from torch.distributions.beta import Beta
import numpy as np
def one_hot(x, num_classes, on_value=1., off_value=0., device='cuda'):
x = x.long().view(-1, 1)
return torch.full((x.size()[0], num_classes), off_value, device=device).scatter_(1, x, on_value)
class BaseMiniBatchBlending(metaclass=ABCMeta):
"""Base class for Image Aliasing."""
def __init__(self, num_classes, smoothing=0.):
self.num_classes = num_classes
self.off_value = smoothing / self.num_classes
self.on_value = 1. - smoothing + self.off_value
@abstractmethod
def do_blending(self, imgs, label, **kwargs):
pass
def __call__(self, imgs, label, **kwargs):
"""Blending data in a mini-batch.
Images are float tensors with the shape of (B, N, C, H, W) for 2D
recognizers or (B, N, C, T, H, W) for 3D recognizers.
Besides, labels are converted from hard labels to soft labels.
Hard labels are integer tensors with the shape of (B, 1) and all of the
elements are in the range [0, num_classes - 1].
Soft labels (probablity distribution over classes) are float tensors
with the shape of (B, 1, num_classes) and all of the elements are in
the range [0, 1].
Args:
imgs (torch.Tensor): Model input images, float tensor with the
shape of (B, N, C, H, W) or (B, N, C, T, H, W).
label (torch.Tensor): Hard labels, integer tensor with the shape
of (B, 1) and all elements are in range [0, num_classes).
kwargs (dict, optional): Other keyword argument to be used to
blending imgs and labels in a mini-batch.
Returns:
mixed_imgs (torch.Tensor): Blending images, float tensor with the
same shape of the input imgs.
mixed_label (torch.Tensor): Blended soft labels, float tensor with
the shape of (B, 1, num_classes) and all elements are in range
[0, 1].
"""
one_hot_label = one_hot(label, num_classes=self.num_classes, on_value=self.on_value, off_value=self.off_value, device=label.device)
mixed_imgs, mixed_label = self.do_blending(imgs, one_hot_label,
**kwargs)
return mixed_imgs, mixed_label
class MixupBlending(BaseMiniBatchBlending):
"""Implementing Mixup in a mini-batch.
This module is proposed in `mixup: Beyond Empirical Risk Minimization
<https://arxiv.org/abs/1710.09412>`_.
Code Reference https://github.com/open-mmlab/mmclassification/blob/master/mmcls/models/utils/mixup.py # noqa
Args:
num_classes (int): The number of classes.
alpha (float): Parameters for Beta distribution.
"""
def __init__(self, num_classes, alpha=.2, smoothing=0.):
super().__init__(num_classes=num_classes, smoothing=smoothing)
self.beta = Beta(alpha, alpha)
def do_blending(self, imgs, label, **kwargs):
"""Blending images with mixup."""
assert len(kwargs) == 0, f'unexpected kwargs for mixup {kwargs}'
lam = self.beta.sample()
batch_size = imgs.size(0)
rand_index = torch.randperm(batch_size)
mixed_imgs = lam * imgs + (1 - lam) * imgs[rand_index, :]
mixed_label = lam * label + (1 - lam) * label[rand_index, :]
return mixed_imgs, mixed_label
class CutmixBlending(BaseMiniBatchBlending):
"""Implementing Cutmix in a mini-batch.
This module is proposed in `CutMix: Regularization Strategy to Train Strong
Classifiers with Localizable Features <https://arxiv.org/abs/1905.04899>`_.
Code Reference https://github.com/clovaai/CutMix-PyTorch
Args:
num_classes (int): The number of classes.
alpha (float): Parameters for Beta distribution.
"""
def __init__(self, num_classes, alpha=.2, smoothing=0.):
super().__init__(num_classes=num_classes, smoothing=smoothing)
self.beta = Beta(alpha, alpha)
@staticmethod
def rand_bbox(img_size, lam):
"""Generate a random boudning box."""
w = img_size[-1]
h = img_size[-2]
cut_rat = torch.sqrt(1. - lam)
cut_w = torch.tensor(int(w * cut_rat))
cut_h = torch.tensor(int(h * cut_rat))
# uniform
cx = torch.randint(w, (1, ))[0]
cy = torch.randint(h, (1, ))[0]
bbx1 = torch.clamp(cx - cut_w // 2, 0, w)
bby1 = torch.clamp(cy - cut_h // 2, 0, h)
bbx2 = torch.clamp(cx + cut_w // 2, 0, w)
bby2 = torch.clamp(cy + cut_h // 2, 0, h)
return bbx1, bby1, bbx2, bby2
def do_blending(self, imgs, label, **kwargs):
"""Blending images with cutmix."""
assert len(kwargs) == 0, f'unexpected kwargs for cutmix {kwargs}'
batch_size = imgs.size(0)
rand_index = torch.randperm(batch_size)
lam = self.beta.sample()
bbx1, bby1, bbx2, bby2 = self.rand_bbox(imgs.size(), lam)
imgs[:, ..., bby1:bby2, bbx1:bbx2] = imgs[rand_index, ..., bby1:bby2,
bbx1:bbx2]
lam = 1 - (1.0 * (bbx2 - bbx1) * (bby2 - bby1) /
(imgs.size()[-1] * imgs.size()[-2]))
label = lam * label + (1 - lam) * label[rand_index, :]
return imgs, label
class LabelSmoothing(BaseMiniBatchBlending):
def do_blending(self, imgs, label, **kwargs):
return imgs, label
class CutmixMixupBlending(BaseMiniBatchBlending):
def __init__(self, num_classes=400, smoothing=0.1, mixup_alpha=.8, cutmix_alpha=1, switch_prob=0.5):
super().__init__(num_classes=num_classes, smoothing=smoothing)
self.mixup_beta = Beta(mixup_alpha, mixup_alpha)
self.cutmix_beta = Beta(cutmix_alpha, cutmix_alpha)
self.switch_prob = switch_prob
@staticmethod
def rand_bbox(img_size, lam):
"""Generate a random boudning box."""
w = img_size[-1]
h = img_size[-2]
cut_rat = torch.sqrt(1. - lam)
cut_w = torch.tensor(int(w * cut_rat))
cut_h = torch.tensor(int(h * cut_rat))
# uniform
cx = torch.randint(w, (1, ))[0]
cy = torch.randint(h, (1, ))[0]
bbx1 = torch.clamp(cx - cut_w // 2, 0, w)
bby1 = torch.clamp(cy - cut_h // 2, 0, h)
bbx2 = torch.clamp(cx + cut_w // 2, 0, w)
bby2 = torch.clamp(cy + cut_h // 2, 0, h)
return bbx1, bby1, bbx2, bby2
def do_cutmix(self, imgs, label, **kwargs):
"""Blending images with cutmix."""
assert len(kwargs) == 0, f'unexpected kwargs for cutmix {kwargs}'
batch_size = imgs.size(0)
rand_index = torch.randperm(batch_size)
lam = self.cutmix_beta.sample()
bbx1, bby1, bbx2, bby2 = self.rand_bbox(imgs.size(), lam)
imgs[:, ..., bby1:bby2, bbx1:bbx2] = imgs[rand_index, ..., bby1:bby2,
bbx1:bbx2]
lam = 1 - (1.0 * (bbx2 - bbx1) * (bby2 - bby1) /
(imgs.size()[-1] * imgs.size()[-2]))
label = lam * label + (1 - lam) * label[rand_index, :]
return imgs, label
def do_mixup(self, imgs, label, **kwargs):
"""Blending images with mixup."""
assert len(kwargs) == 0, f'unexpected kwargs for mixup {kwargs}'
lam = self.mixup_beta.sample()
batch_size = imgs.size(0)
rand_index = torch.randperm(batch_size)
mixed_imgs = lam * imgs + (1 - lam) * imgs[rand_index, :]
mixed_label = lam * label + (1 - lam) * label[rand_index, :]
return mixed_imgs, mixed_label
def do_blending(self, imgs, label, **kwargs):
"""Blending images with MViT style. Cutmix for half for mixup for the other half."""
assert len(kwargs) == 0, f'unexpected kwargs for cutmix_half_mixup {kwargs}'
if np.random.rand() < self.switch_prob :
return self.do_cutmix(imgs, label)
else:
return self.do_mixup(imgs, label)
| 8,103 | 36.693023 | 139 | py |
ILA | ILA-master/datasets/build.py | from logging import Logger
from torch.utils.data import DataLoader
import torch.distributed as dist
import torch
import numpy as np
from functools import partial
import random
import io
import os
import os.path as osp
import shutil
import warnings
from collections.abc import Mapping, Sequence
from mmcv.utils import Registry, build_from_cfg
from torch.utils.data import Dataset
import copy
import os.path as osp
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict, defaultdict
import os.path as osp
import mmcv
import numpy as np
import torch
import tarfile
from .pipeline import *
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
from mmcv.parallel import collate
import pandas as pd
PIPELINES = Registry('pipeline')
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
img_norm_ssv2_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
class BaseDataset(Dataset, metaclass=ABCMeta):
def __init__(self,
ann_file,
pipeline,
repeat=1,
data_prefix=None,
test_mode=False,
multi_class=False,
num_classes=None,
start_index=1,
modality='RGB',
sample_by_class=False,
power=0,
dynamic_length=False, ):
super().__init__()
self.use_tar_format = True if ".tar" in data_prefix else False
data_prefix = data_prefix.replace(".tar", "")
self.ann_file = ann_file
self.repeat = repeat
self.data_prefix = osp.realpath(
data_prefix) if data_prefix is not None and osp.isdir(
data_prefix) else data_prefix
self.test_mode = test_mode
self.multi_class = multi_class
self.num_classes = num_classes
self.start_index = start_index
self.modality = modality
self.sample_by_class = sample_by_class
self.power = power
self.dynamic_length = dynamic_length
assert not (self.multi_class and self.sample_by_class)
self.pipeline = Compose(pipeline)
self.video_infos = self.load_annotations()
if self.sample_by_class:
self.video_infos_by_class = self.parse_by_class()
class_prob = []
for _, samples in self.video_infos_by_class.items():
class_prob.append(len(samples) / len(self.video_infos))
class_prob = [x ** self.power for x in class_prob]
summ = sum(class_prob)
class_prob = [x / summ for x in class_prob]
self.class_prob = dict(zip(self.video_infos_by_class, class_prob))
@abstractmethod
def load_annotations(self):
"""Load the annotation according to ann_file into video_infos."""
# json annotations already looks like video_infos, so for each dataset,
# this func should be the same
def load_json_annotations(self):
"""Load json annotation file to get video information."""
video_infos = mmcv.load(self.ann_file)
num_videos = len(video_infos)
path_key = 'frame_dir' if 'frame_dir' in video_infos[0] else 'filename'
for i in range(num_videos):
path_value = video_infos[i][path_key]
if self.data_prefix is not None:
path_value = osp.join(self.data_prefix, path_value)
video_infos[i][path_key] = path_value
if self.multi_class:
assert self.num_classes is not None
else:
assert len(video_infos[i]['label']) == 1
video_infos[i]['label'] = video_infos[i]['label'][0]
return video_infos
def parse_by_class(self):
video_infos_by_class = defaultdict(list)
for item in self.video_infos:
label = item['label']
video_infos_by_class[label].append(item)
return video_infos_by_class
@staticmethod
def label2array(num, label):
arr = np.zeros(num, dtype=np.float32)
arr[label] = 1.
return arr
@staticmethod
def dump_results(results, out):
"""Dump data to json/yaml/pickle strings or files."""
return mmcv.dump(results, out)
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['modality'] = self.modality
results['start_index'] = self.start_index
# prepare tensor in getitem
# If HVU, type(results['label']) is dict
if self.multi_class and isinstance(results['label'], list):
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
aug1 = self.pipeline(results)
if self.repeat > 1:
aug2 = self.pipeline(results)
ret = {"imgs": torch.cat((aug1['imgs'], aug2['imgs']), 0),
"label": aug1['label'].repeat(2),
}
return ret
else:
return aug1
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['modality'] = self.modality
results['start_index'] = self.start_index
# prepare tensor in getitem
# If HVU, type(results['label']) is dict
if self.multi_class and isinstance(results['label'], list):
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
return self.pipeline(results)
def __len__(self):
"""Get the size of the dataset."""
return len(self.video_infos)
def __getitem__(self, idx):
"""Get the sample for either training or testing given index."""
if self.test_mode:
return self.prepare_test_frames(idx)
return self.prepare_train_frames(idx)
def heatmap_data_process(video_info, flag=True):
scale_resize = int(256 / 224 * 224)
val_pipeline_1 = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, scale_resize)),
dict(type='CenterCrop', crop_size=224),
]
val_pipeline_2 = [
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
val_pipeline_3 = [
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
pipeline_1 = Compose(val_pipeline_1)
pipeline_2 = Compose(val_pipeline_2)
pipeline_3 = Compose(val_pipeline_3)
out = pipeline_1(video_info)
if flag:
out = pipeline_2(out)
else:
out = pipeline_3(out)
return out
class VideoDataset(BaseDataset):
def __init__(self, ann_file, pipeline, labels_file, start_index=0, **kwargs):
super().__init__(ann_file, pipeline, start_index=start_index, **kwargs)
self.labels_file = labels_file
@property
def classes(self):
classes_all = pd.read_csv(self.labels_file)
return classes_all.values.tolist()
def load_annotations(self):
"""Load annotation file to get video information."""
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
# ++++++++++Adding+++++++++++++
if len(line) == 0:
continue
# +++++++++++++++++++++++++++++
line_split = line.strip().split()
if self.multi_class:
assert self.num_classes is not None
filename, label = line_split[0], line_split[1:]
label = list(map(int, label))
else:
filename, label = line_split
label = int(label)
if self.data_prefix is not None:
filename = osp.join(self.data_prefix, filename)
video_infos.append(dict(filename=filename, label=label, tar=self.use_tar_format))
return video_infos
class SubsetRandomSampler(torch.utils.data.Sampler):
r"""Samples elements randomly from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices):
self.epoch = 0
self.indices = indices
def __iter__(self):
return (self.indices[i] for i in torch.randperm(len(self.indices)))
def __len__(self):
return len(self.indices)
def set_epoch(self, epoch):
self.epoch = epoch
def mmcv_collate(batch, samples_per_gpu=1):
if not isinstance(batch, Sequence):
raise TypeError(f'{batch.dtype} is not supported.')
if isinstance(batch[0], Sequence):
transposed = zip(*batch)
return [collate(samples, samples_per_gpu) for samples in transposed]
elif isinstance(batch[0], Mapping):
return {
key: mmcv_collate([d[key] for d in batch], samples_per_gpu)
for key in batch[0]
}
else:
return default_collate(batch)
def build_dataloader(logger, config):
scale_resize = int(256 / 224 * config.DATA.INPUT_SIZE)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=config.DATA.NUM_FRAMES, frame_interval=2, num_clips=1, frame_uniform=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, scale_resize)),
dict(
type='MultiScaleCrop',
input_size=config.DATA.INPUT_SIZE,
scales=(1, 0.875, 0.75, 0.66),
random_crop=False,
max_wh_scale_gap=1),
dict(type='Resize', scale=(config.DATA.INPUT_SIZE, config.DATA.INPUT_SIZE), keep_ratio=False),
dict(type='Flip', flip_ratio=0),
dict(type='ColorJitter', p=config.AUG.COLOR_JITTER),
dict(type='GrayScale', p=config.AUG.GRAY_SCALE),
dict(type='Normalize', **img_norm_ssv2_cfg),
dict(type='RandomErasing', probability=0.25),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label']),
]
train_data = VideoDataset(ann_file=config.DATA.TRAIN_FILE, data_prefix=config.DATA.ROOT, labels_file=config.DATA.LABEL_LIST, pipeline=train_pipeline)
num_tasks = dist.get_world_size()
global_rank = dist.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
train_data, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
train_loader = DataLoader(
train_data, sampler=sampler_train,
batch_size=config.TRAIN.BATCH_SIZE,
num_workers=16,
pin_memory=True,
drop_last=True,
collate_fn=partial(mmcv_collate, samples_per_gpu=config.TRAIN.BATCH_SIZE),
)
val_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=config.DATA.NUM_FRAMES, frame_interval=2, num_clips=1, frame_uniform=True,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, scale_resize)),
dict(type='CenterCrop', crop_size=config.DATA.INPUT_SIZE),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_ssv2_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
if config.TEST.NUM_CROP == 3:
val_pipeline[3] = dict(type='Resize', scale=(-1, config.DATA.INPUT_SIZE))
val_pipeline[4] = dict(type='ThreeCrop', crop_size=config.DATA.INPUT_SIZE)
if config.TEST.NUM_CLIP > 1:
val_pipeline[1] = dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=config.DATA.NUM_FRAMES, multiview=config.TEST.NUM_CLIP)
val_data = VideoDataset(ann_file=config.DATA.VAL_FILE, data_prefix=config.DATA.ROOT, labels_file=config.DATA.LABEL_LIST, pipeline=val_pipeline)
indices = np.arange(dist.get_rank(), len(val_data), dist.get_world_size())
sampler_val = SubsetRandomSampler(indices)
val_loader = DataLoader(
val_data, sampler=sampler_val,
batch_size=2,
num_workers=16,
pin_memory=True,
drop_last=True,
collate_fn=partial(mmcv_collate, samples_per_gpu=2),
)
return train_data, val_data, train_loader, val_loader
| 12,997 | 35.105556 | 153 | py |
ILA | ILA-master/datasets/pipeline.py | import io
import os
import os.path as osp
import shutil
import warnings
from collections.abc import Sequence
from mmcv.utils import Registry, build_from_cfg
from torch.utils.data import Dataset
import copy
import os.path as osp
import warnings
from abc import ABCMeta, abstractmethod
from collections import OrderedDict, defaultdict
import os.path as osp
import mmcv
import numpy as np
import torch
import tarfile
import timm.data as tdata
from torch.nn.modules.utils import _pair
import random
import torchvision
from PIL import Image
from .rand_augment import rand_augment_transform
from torchvision import transforms
from mmcv.fileio import FileClient
PIPELINES = Registry('pipeline')
def _init_lazy_if_proper(results, lazy):
"""Initialize lazy operation properly.
Make sure that a lazy operation is properly initialized,
and avoid a non-lazy operation accidentally getting mixed in.
Required keys in results are "imgs" if "img_shape" not in results,
otherwise, Required keys in results are "img_shape", add or modified keys
are "img_shape", "lazy".
Add or modified keys in "lazy" are "original_shape", "crop_bbox", "flip",
"flip_direction", "interpolation".
Args:
results (dict): A dict stores data pipeline result.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
if 'img_shape' not in results:
results['img_shape'] = results['imgs'][0].shape[:2]
if lazy:
if 'lazy' not in results:
img_h, img_w = results['img_shape']
lazyop = dict()
lazyop['original_shape'] = results['img_shape']
lazyop['crop_bbox'] = np.array([0, 0, img_w, img_h],
dtype=np.float32)
lazyop['flip'] = False
lazyop['flip_direction'] = None
lazyop['interpolation'] = None
results['lazy'] = lazyop
else:
assert 'lazy' not in results, 'Use Fuse after lazy operations'
def _pil_interp(method):
if method == "bicubic":
return Image.BICUBIC
elif method == "lanczos":
return Image.LANCZOS
elif method == "hamming":
return Image.HAMMING
else:
return Image.BILINEAR
class EntityBoxRescale:
def __init__(self, scale_factor):
raise NotImplementedError(
'This component should not be used in the '
'data pipeline and is removed in PR #782. Details see '
'https://github.com/open-mmlab/mmaction2/pull/782')
@PIPELINES.register_module()
class EntityBoxCrop:
def __init__(self, crop_bbox):
raise NotImplementedError(
'This component should not be used in the '
'data pipeline and is removed in PR #782. Details see '
'https://github.com/open-mmlab/mmaction2/pull/782')
@PIPELINES.register_module()
class EntityBoxFlip:
def __init__(self, img_shape):
raise NotImplementedError(
'This component should not be used in the '
'data pipeline and is removed in PR #782. Details see '
'https://github.com/open-mmlab/mmaction2/pull/782')
@PIPELINES.register_module()
class Imgaug:
"""Imgaug augmentation.
Adds custom transformations from imgaug library.
Please visit `https://imgaug.readthedocs.io/en/latest/index.html`
to get more information. Two demo configs could be found in tsn and i3d
config folder.
It's better to use uint8 images as inputs since imgaug works best with
numpy dtype uint8 and isn't well tested with other dtypes. It should be
noted that not all of the augmenters have the same input and output dtype,
which may cause unexpected results.
Required keys are "imgs", "img_shape"(if "gt_bboxes" is not None) and
"modality", added or modified keys are "imgs", "img_shape", "gt_bboxes"
and "proposals".
It is worth mentioning that `Imgaug` will NOT create custom keys like
"interpolation", "crop_bbox", "flip_direction", etc. So when using
`Imgaug` along with other mmaction2 pipelines, we should pay more attention
to required keys.
Two steps to use `Imgaug` pipeline:
1. Create initialization parameter `transforms`. There are three ways
to create `transforms`.
1) string: only support `default` for now.
e.g. `transforms='default'`
2) list[dict]: create a list of augmenters by a list of dicts, each
dict corresponds to one augmenter. Every dict MUST contain a key
named `type`. `type` should be a string(iaa.Augmenter's name) or
an iaa.Augmenter subclass.
e.g. `transforms=[dict(type='Rotate', rotate=(-20, 20))]`
e.g. `transforms=[dict(type=iaa.Rotate, rotate=(-20, 20))]`
3) iaa.Augmenter: create an imgaug.Augmenter object.
e.g. `transforms=iaa.Rotate(rotate=(-20, 20))`
2. Add `Imgaug` in dataset pipeline. It is recommended to insert imgaug
pipeline before `Normalize`. A demo pipeline is listed as follows.
```
pipeline = [
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=16,
),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(
type='MultiScaleCrop',
input_size=224,
scales=(1, 0.875, 0.75, 0.66),
random_crop=False,
max_wh_scale_gap=1,
num_fixed_crops=13),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Imgaug', transforms='default'),
# dict(type='Imgaug', transforms=[
# dict(type='Rotate', rotate=(-20, 20))
# ]),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
```
Args:
transforms (str | list[dict] | :obj:`iaa.Augmenter`): Three different
ways to create imgaug augmenter.
"""
def __init__(self, transforms):
import imgaug.augmenters as iaa
if transforms == 'default':
self.transforms = self.default_transforms()
elif isinstance(transforms, list):
assert all(isinstance(trans, dict) for trans in transforms)
self.transforms = transforms
elif isinstance(transforms, iaa.Augmenter):
self.aug = self.transforms = transforms
else:
raise ValueError('transforms must be `default` or a list of dicts'
' or iaa.Augmenter object')
if not isinstance(transforms, iaa.Augmenter):
self.aug = iaa.Sequential(
[self.imgaug_builder(t) for t in self.transforms])
@staticmethod
def default_transforms():
"""Default transforms for imgaug.
Implement RandAugment by imgaug.
Plase visit `https://arxiv.org/abs/1909.13719` for more information.
Augmenters and hyper parameters are borrowed from the following repo:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py # noqa
Miss one augmenter ``SolarizeAdd`` since imgaug doesn't support this.
Returns:
dict: The constructed RandAugment transforms.
"""
# RandAugment hyper params
num_augmenters = 2
cur_magnitude, max_magnitude = 9, 10
cur_level = 1.0 * cur_magnitude / max_magnitude
return [
dict(
type='SomeOf',
n=num_augmenters,
children=[
dict(
type='ShearX',
shear=17.19 * cur_level * random.choice([-1, 1])),
dict(
type='ShearY',
shear=17.19 * cur_level * random.choice([-1, 1])),
dict(
type='TranslateX',
percent=.2 * cur_level * random.choice([-1, 1])),
dict(
type='TranslateY',
percent=.2 * cur_level * random.choice([-1, 1])),
dict(
type='Rotate',
rotate=30 * cur_level * random.choice([-1, 1])),
dict(type='Posterize', nb_bits=max(1, int(4 * cur_level))),
dict(type='Solarize', threshold=256 * cur_level),
dict(type='EnhanceColor', factor=1.8 * cur_level + .1),
dict(type='EnhanceContrast', factor=1.8 * cur_level + .1),
dict(
type='EnhanceBrightness', factor=1.8 * cur_level + .1),
dict(type='EnhanceSharpness', factor=1.8 * cur_level + .1),
dict(type='Autocontrast', cutoff=0),
dict(type='Equalize'),
dict(type='Invert', p=1.),
dict(
type='Cutout',
nb_iterations=1,
size=0.2 * cur_level,
squared=True)
])
]
def imgaug_builder(self, cfg):
"""Import a module from imgaug.
It follows the logic of :func:`build_from_cfg`. Use a dict object to
create an iaa.Augmenter object.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
Returns:
obj:`iaa.Augmenter`: The constructed imgaug augmenter.
"""
import imgaug.augmenters as iaa
assert isinstance(cfg, dict) and 'type' in cfg
args = cfg.copy()
obj_type = args.pop('type')
if mmcv.is_str(obj_type):
obj_cls = getattr(iaa, obj_type) if hasattr(iaa, obj_type) \
else getattr(iaa.pillike, obj_type)
elif issubclass(obj_type, iaa.Augmenter):
obj_cls = obj_type
else:
raise TypeError(
f'type must be a str or valid type, but got {type(obj_type)}')
if 'children' in args:
args['children'] = [
self.imgaug_builder(child) for child in args['children']
]
return obj_cls(**args)
def __repr__(self):
repr_str = self.__class__.__name__ + f'(transforms={self.aug})'
return repr_str
def __call__(self, results):
assert results['modality'] == 'RGB', 'Imgaug only support RGB images.'
in_type = results['imgs'][0].dtype.type
cur_aug = self.aug.to_deterministic()
results['imgs'] = [
cur_aug.augment_image(frame) for frame in results['imgs']
]
img_h, img_w, _ = results['imgs'][0].shape
out_type = results['imgs'][0].dtype.type
assert in_type == out_type, \
('Imgaug input dtype and output dtype are not the same. ',
f'Convert from {in_type} to {out_type}')
if 'gt_bboxes' in results:
from imgaug.augmentables import bbs
bbox_list = [
bbs.BoundingBox(
x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3])
for bbox in results['gt_bboxes']
]
bboxes = bbs.BoundingBoxesOnImage(
bbox_list, shape=results['img_shape'])
bbox_aug, *_ = cur_aug.augment_bounding_boxes([bboxes])
results['gt_bboxes'] = [[
max(bbox.x1, 0),
max(bbox.y1, 0),
min(bbox.x2, img_w),
min(bbox.y2, img_h)
] for bbox in bbox_aug.items]
if 'proposals' in results:
bbox_list = [
bbs.BoundingBox(
x1=bbox[0], y1=bbox[1], x2=bbox[2], y2=bbox[3])
for bbox in results['proposals']
]
bboxes = bbs.BoundingBoxesOnImage(
bbox_list, shape=results['img_shape'])
bbox_aug, *_ = cur_aug.augment_bounding_boxes([bboxes])
results['proposals'] = [[
max(bbox.x1, 0),
max(bbox.y1, 0),
min(bbox.x2, img_w),
min(bbox.y2, img_h)
] for bbox in bbox_aug.items]
results['img_shape'] = (img_h, img_w)
return results
@PIPELINES.register_module()
class RandomErasing(tdata.random_erasing.RandomErasing):
def __init__(self, device='cpu', **args):
super().__init__(device=device, **args)
def __call__(self, results):
in_type = results['imgs'][0].dtype.type
rand_state = random.getstate()
torchrand_state = torch.get_rng_state()
numpyrand_state = np.random.get_state()
# not using cuda to preserve the determiness
out_frame = []
for frame in results['imgs']:
random.setstate(rand_state)
torch.set_rng_state(torchrand_state)
np.random.set_state(numpyrand_state)
frame = super().__call__(torch.from_numpy(frame).permute(2, 0, 1)).permute(1, 2, 0).numpy()
out_frame.append(frame)
results['imgs'] = out_frame
img_h, img_w, _ = results['imgs'][0].shape
out_type = results['imgs'][0].dtype.type
assert in_type == out_type, \
('Timmaug input dtype and output dtype are not the same. ',
f'Convert from {in_type} to {out_type}')
if 'gt_bboxes' in results:
raise NotImplementedError('only support recognition now')
assert results['img_shape'] == (img_h, img_w)
return results
@PIPELINES.register_module()
class Fuse:
"""Fuse lazy operations.
Fusion order:
crop -> resize -> flip
Required keys are "imgs", "img_shape" and "lazy", added or modified keys
are "imgs", "lazy".
Required keys in "lazy" are "crop_bbox", "interpolation", "flip_direction".
"""
def __call__(self, results):
if 'lazy' not in results:
raise ValueError('No lazy operation detected')
lazyop = results['lazy']
imgs = results['imgs']
# crop
left, top, right, bottom = lazyop['crop_bbox'].round().astype(int)
imgs = [img[top:bottom, left:right] for img in imgs]
# resize
img_h, img_w = results['img_shape']
if lazyop['interpolation'] is None:
interpolation = 'bilinear'
else:
interpolation = lazyop['interpolation']
imgs = [
mmcv.imresize(img, (img_w, img_h), interpolation=interpolation)
for img in imgs
]
# flip
if lazyop['flip']:
for img in imgs:
mmcv.imflip_(img, lazyop['flip_direction'])
results['imgs'] = imgs
del results['lazy']
return results
@PIPELINES.register_module()
class RandomScale:
"""Resize images by a random scale.
Required keys are "imgs", "img_shape", "modality", added or modified
keys are "imgs", "img_shape", "keep_ratio", "scale_factor", "lazy",
"scale", "resize_size". Required keys in "lazy" is None, added or
modified key is "interpolation".
Args:
scales (tuple[int]): Tuple of scales to be chosen for resize.
mode (str): Selection mode for choosing the scale. Options are "range"
and "value". If set to "range", The short edge will be randomly
chosen from the range of minimum and maximum on the shorter one
in all tuples. Otherwise, the longer edge will be randomly chosen
from the range of minimum and maximum on the longer one in all
tuples. Default: 'range'.
"""
def __init__(self, scales, mode='range', **kwargs):
warnings.warn('"RandomScale" is deprecated and will be removed in '
'later versions. It is currently not used in MMAction2')
self.mode = mode
if self.mode not in ['range', 'value']:
raise ValueError(f"mode should be 'range' or 'value', "
f'but got {self.mode}')
self.scales = scales
self.kwargs = kwargs
def select_scale(self, scales):
num_scales = len(scales)
if num_scales == 1:
# specify a fixed scale
scale = scales[0]
elif num_scales == 2:
if self.mode == 'range':
scale_long = [max(s) for s in scales]
scale_short = [min(s) for s in scales]
long_edge = np.random.randint(
min(scale_long),
max(scale_long) + 1)
short_edge = np.random.randint(
min(scale_short),
max(scale_short) + 1)
scale = (long_edge, short_edge)
elif self.mode == 'value':
scale = random.choice(scales)
else:
if self.mode != 'value':
raise ValueError("Only 'value' mode supports more than "
'2 image scales')
scale = random.choice(scales)
return scale
def __call__(self, results):
scale = self.select_scale(self.scales)
results['scale'] = scale
resize = Resize(scale, **self.kwargs)
results = resize(results)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'scales={self.scales}, mode={self.mode})')
return repr_str
@PIPELINES.register_module()
class RandomCrop:
"""Vanilla square random crop that specifics the output size.
Required keys in results are "img_shape", "keypoint" (optional), "imgs"
(optional), added or modified keys are "keypoint", "imgs", "lazy"; Required
keys in "lazy" are "flip", "crop_bbox", added or modified key is
"crop_bbox".
Args:
size (int): The output size of the images.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self, size, lazy=False):
if not isinstance(size, int):
raise TypeError(f'Size must be an int, but got {type(size)}')
self.size = size
self.lazy = lazy
@staticmethod
def _crop_kps(kps, crop_bbox):
return kps - crop_bbox[:2]
@staticmethod
def _crop_imgs(imgs, crop_bbox):
x1, y1, x2, y2 = crop_bbox
return [img[y1:y2, x1:x2] for img in imgs]
@staticmethod
def _box_crop(box, crop_bbox):
"""Crop the bounding boxes according to the crop_bbox.
Args:
box (np.ndarray): The bounding boxes.
crop_bbox(np.ndarray): The bbox used to crop the original image.
"""
x1, y1, x2, y2 = crop_bbox
img_w, img_h = x2 - x1, y2 - y1
box_ = box.copy()
box_[..., 0::2] = np.clip(box[..., 0::2] - x1, 0, img_w - 1)
box_[..., 1::2] = np.clip(box[..., 1::2] - y1, 0, img_h - 1)
return box_
def _all_box_crop(self, results, crop_bbox):
"""Crop the gt_bboxes and proposals in results according to crop_bbox.
Args:
results (dict): All information about the sample, which contain
'gt_bboxes' and 'proposals' (optional).
crop_bbox(np.ndarray): The bbox used to crop the original image.
"""
results['gt_bboxes'] = self._box_crop(results['gt_bboxes'], crop_bbox)
if 'proposals' in results and results['proposals'] is not None:
assert results['proposals'].shape[1] == 4
results['proposals'] = self._box_crop(results['proposals'],
crop_bbox)
return results
def __call__(self, results):
"""Performs the RandomCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
img_h, img_w = results['img_shape']
assert self.size <= img_h and self.size <= img_w
y_offset = 0
x_offset = 0
if img_h > self.size:
y_offset = int(np.random.randint(0, img_h - self.size))
if img_w > self.size:
x_offset = int(np.random.randint(0, img_w - self.size))
if 'crop_quadruple' not in results:
results['crop_quadruple'] = np.array(
[0, 0, 1, 1], # x, y, w, h
dtype=np.float32)
x_ratio, y_ratio = x_offset / img_w, y_offset / img_h
w_ratio, h_ratio = self.size / img_w, self.size / img_h
old_crop_quadruple = results['crop_quadruple']
old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]
old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]
new_crop_quadruple = [
old_x_ratio + x_ratio * old_w_ratio,
old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,
h_ratio * old_x_ratio
]
results['crop_quadruple'] = np.array(
new_crop_quadruple, dtype=np.float32)
new_h, new_w = self.size, self.size
crop_bbox = np.array(
[x_offset, y_offset, x_offset + new_w, y_offset + new_h])
results['crop_bbox'] = crop_bbox
results['img_shape'] = (new_h, new_w)
if not self.lazy:
if 'keypoint' in results:
results['keypoint'] = self._crop_kps(results['keypoint'],
crop_bbox)
if 'imgs' in results:
results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = x_offset * (lazy_right - lazy_left) / img_w
right = (x_offset + new_w) * (lazy_right - lazy_left) / img_w
top = y_offset * (lazy_bottom - lazy_top) / img_h
bottom = (y_offset + new_h) * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
# Process entity boxes
if 'gt_bboxes' in results:
assert not self.lazy
results = self._all_box_crop(results, results['crop_bbox'])
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}(size={self.size}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class RandomResizedCrop(RandomCrop):
"""Random crop that specifics the area and height-weight ratio range.
Required keys in results are "img_shape", "crop_bbox", "imgs" (optional),
"keypoint" (optional), added or modified keys are "imgs", "keypoint",
"crop_bbox" and "lazy"; Required keys in "lazy" are "flip", "crop_bbox",
added or modified key is "crop_bbox".
Args:
area_range (Tuple[float]): The candidate area scales range of
output cropped images. Default: (0.08, 1.0).
aspect_ratio_range (Tuple[float]): The candidate aspect ratio range of
output cropped images. Default: (3 / 4, 4 / 3).
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self,
area_range=(0.08, 1.0),
aspect_ratio_range=(3 / 4, 4 / 3),
lazy=False):
self.area_range = area_range
self.aspect_ratio_range = aspect_ratio_range
self.lazy = lazy
if not mmcv.is_tuple_of(self.area_range, float):
raise TypeError(f'Area_range must be a tuple of float, '
f'but got {type(area_range)}')
if not mmcv.is_tuple_of(self.aspect_ratio_range, float):
raise TypeError(f'Aspect_ratio_range must be a tuple of float, '
f'but got {type(aspect_ratio_range)}')
@staticmethod
def get_crop_bbox(img_shape,
area_range,
aspect_ratio_range,
max_attempts=10):
"""Get a crop bbox given the area range and aspect ratio range.
Args:
img_shape (Tuple[int]): Image shape
area_range (Tuple[float]): The candidate area scales range of
output cropped images. Default: (0.08, 1.0).
aspect_ratio_range (Tuple[float]): The candidate aspect
ratio range of output cropped images. Default: (3 / 4, 4 / 3).
max_attempts (int): The maximum of attempts. Default: 10.
max_attempts (int): Max attempts times to generate random candidate
bounding box. If it doesn't qualified one, the center bounding
box will be used.
Returns:
(list[int]) A random crop bbox within the area range and aspect
ratio range.
"""
assert 0 < area_range[0] <= area_range[1] <= 1
assert 0 < aspect_ratio_range[0] <= aspect_ratio_range[1]
img_h, img_w = img_shape
area = img_h * img_w
min_ar, max_ar = aspect_ratio_range
aspect_ratios = np.exp(
np.random.uniform(
np.log(min_ar), np.log(max_ar), size=max_attempts))
target_areas = np.random.uniform(*area_range, size=max_attempts) * area
candidate_crop_w = np.round(np.sqrt(target_areas *
aspect_ratios)).astype(np.int32)
candidate_crop_h = np.round(np.sqrt(target_areas /
aspect_ratios)).astype(np.int32)
for i in range(max_attempts):
crop_w = candidate_crop_w[i]
crop_h = candidate_crop_h[i]
if crop_h <= img_h and crop_w <= img_w:
x_offset = random.randint(0, img_w - crop_w)
y_offset = random.randint(0, img_h - crop_h)
return x_offset, y_offset, x_offset + crop_w, y_offset + crop_h
# Fallback
crop_size = min(img_h, img_w)
x_offset = (img_w - crop_size) // 2
y_offset = (img_h - crop_size) // 2
return x_offset, y_offset, x_offset + crop_size, y_offset + crop_size
def __call__(self, results):
"""Performs the RandomResizeCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
img_h, img_w = results['img_shape']
left, top, right, bottom = self.get_crop_bbox(
(img_h, img_w), self.area_range, self.aspect_ratio_range)
new_h, new_w = bottom - top, right - left
if 'crop_quadruple' not in results:
results['crop_quadruple'] = np.array(
[0, 0, 1, 1], # x, y, w, h
dtype=np.float32)
x_ratio, y_ratio = left / img_w, top / img_h
w_ratio, h_ratio = new_w / img_w, new_h / img_h
old_crop_quadruple = results['crop_quadruple']
old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]
old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]
new_crop_quadruple = [
old_x_ratio + x_ratio * old_w_ratio,
old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,
h_ratio * old_x_ratio
]
results['crop_quadruple'] = np.array(
new_crop_quadruple, dtype=np.float32)
crop_bbox = np.array([left, top, right, bottom])
results['crop_bbox'] = crop_bbox
results['img_shape'] = (new_h, new_w)
if not self.lazy:
if 'keypoint' in results:
results['keypoint'] = self._crop_kps(results['keypoint'],
crop_bbox)
if 'imgs' in results:
results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = left * (lazy_right - lazy_left) / img_w
right = right * (lazy_right - lazy_left) / img_w
top = top * (lazy_bottom - lazy_top) / img_h
bottom = bottom * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
if 'gt_bboxes' in results:
assert not self.lazy
results = self._all_box_crop(results, results['crop_bbox'])
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'area_range={self.area_range}, '
f'aspect_ratio_range={self.aspect_ratio_range}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class MultiScaleCrop(RandomCrop):
"""Crop images with a list of randomly selected scales.
Randomly select the w and h scales from a list of scales. Scale of 1 means
the base size, which is the minimal of image width and height. The scale
level of w and h is controlled to be smaller than a certain value to
prevent too large or small aspect ratio.
Required keys are "img_shape", "imgs" (optional), "keypoint" (optional),
added or modified keys are "imgs", "crop_bbox", "img_shape", "lazy" and
"scales". Required keys in "lazy" are "crop_bbox", added or modified key is
"crop_bbox".
Args:
input_size (int | tuple[int]): (w, h) of network input.
scales (tuple[float]): width and height scales to be selected.
max_wh_scale_gap (int): Maximum gap of w and h scale levels.
Default: 1.
random_crop (bool): If set to True, the cropping bbox will be randomly
sampled, otherwise it will be sampler from fixed regions.
Default: False.
num_fixed_crops (int): If set to 5, the cropping bbox will keep 5
basic fixed regions: "upper left", "upper right", "lower left",
"lower right", "center". If set to 13, the cropping bbox will
append another 8 fix regions: "center left", "center right",
"lower center", "upper center", "upper left quarter",
"upper right quarter", "lower left quarter", "lower right quarter".
Default: 5.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self,
input_size,
scales=(1, ),
max_wh_scale_gap=1,
random_crop=False,
num_fixed_crops=5,
lazy=False):
self.input_size = _pair(input_size)
if not mmcv.is_tuple_of(self.input_size, int):
raise TypeError(f'Input_size must be int or tuple of int, '
f'but got {type(input_size)}')
if not isinstance(scales, tuple):
raise TypeError(f'Scales must be tuple, but got {type(scales)}')
if num_fixed_crops not in [5, 13]:
raise ValueError(f'Num_fix_crops must be in {[5, 13]}, '
f'but got {num_fixed_crops}')
self.scales = scales
self.max_wh_scale_gap = max_wh_scale_gap
self.random_crop = random_crop
self.num_fixed_crops = num_fixed_crops
self.lazy = lazy
def __call__(self, results):
"""Performs the MultiScaleCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
img_h, img_w = results['img_shape']
base_size = min(img_h, img_w)
crop_sizes = [int(base_size * s) for s in self.scales]
candidate_sizes = []
for i, h in enumerate(crop_sizes):
for j, w in enumerate(crop_sizes):
if abs(i - j) <= self.max_wh_scale_gap:
candidate_sizes.append([w, h])
crop_size = random.choice(candidate_sizes)
for i in range(2):
if abs(crop_size[i] - self.input_size[i]) < 3:
crop_size[i] = self.input_size[i]
crop_w, crop_h = crop_size
if self.random_crop:
x_offset = random.randint(0, img_w - crop_w)
y_offset = random.randint(0, img_h - crop_h)
else:
w_step = (img_w - crop_w) // 4
h_step = (img_h - crop_h) // 4
candidate_offsets = [
(0, 0), # upper left
(4 * w_step, 0), # upper right
(0, 4 * h_step), # lower left
(4 * w_step, 4 * h_step), # lower right
(2 * w_step, 2 * h_step), # center
]
if self.num_fixed_crops == 13:
extra_candidate_offsets = [
(0, 2 * h_step), # center left
(4 * w_step, 2 * h_step), # center right
(2 * w_step, 4 * h_step), # lower center
(2 * w_step, 0 * h_step), # upper center
(1 * w_step, 1 * h_step), # upper left quarter
(3 * w_step, 1 * h_step), # upper right quarter
(1 * w_step, 3 * h_step), # lower left quarter
(3 * w_step, 3 * h_step) # lower right quarter
]
candidate_offsets.extend(extra_candidate_offsets)
x_offset, y_offset = random.choice(candidate_offsets)
new_h, new_w = crop_h, crop_w
crop_bbox = np.array(
[x_offset, y_offset, x_offset + new_w, y_offset + new_h])
results['crop_bbox'] = crop_bbox
results['img_shape'] = (new_h, new_w)
results['scales'] = self.scales
if 'crop_quadruple' not in results:
results['crop_quadruple'] = np.array(
[0, 0, 1, 1], # x, y, w, h
dtype=np.float32)
x_ratio, y_ratio = x_offset / img_w, y_offset / img_h
w_ratio, h_ratio = new_w / img_w, new_h / img_h
old_crop_quadruple = results['crop_quadruple']
old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]
old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]
new_crop_quadruple = [
old_x_ratio + x_ratio * old_w_ratio,
old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,
h_ratio * old_x_ratio
]
results['crop_quadruple'] = np.array(
new_crop_quadruple, dtype=np.float32)
if not self.lazy:
if 'keypoint' in results:
results['keypoint'] = self._crop_kps(results['keypoint'],
crop_bbox)
if 'imgs' in results:
results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = x_offset * (lazy_right - lazy_left) / img_w
right = (x_offset + new_w) * (lazy_right - lazy_left) / img_w
top = y_offset * (lazy_bottom - lazy_top) / img_h
bottom = (y_offset + new_h) * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
if 'gt_bboxes' in results:
assert not self.lazy
results = self._all_box_crop(results, results['crop_bbox'])
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'input_size={self.input_size}, scales={self.scales}, '
f'max_wh_scale_gap={self.max_wh_scale_gap}, '
f'random_crop={self.random_crop}, '
f'num_fixed_crops={self.num_fixed_crops}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class Resize:
"""Resize images to a specific size.
Required keys are "img_shape", "modality", "imgs" (optional), "keypoint"
(optional), added or modified keys are "imgs", "img_shape", "keep_ratio",
"scale_factor", "lazy", "resize_size". Required keys in "lazy" is None,
added or modified key is "interpolation".
Args:
scale (float | Tuple[int]): If keep_ratio is True, it serves as scaling
factor or maximum size:
If it is a float number, the image will be rescaled by this
factor, else if it is a tuple of 2 integers, the image will
be rescaled as large as possible within the scale.
Otherwise, it serves as (w, h) of output size.
keep_ratio (bool): If set to True, Images will be resized without
changing the aspect ratio. Otherwise, it will resize images to a
given size. Default: True.
interpolation (str): Algorithm used for interpolation:
"nearest" | "bilinear". Default: "bilinear".
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self,
scale,
keep_ratio=True,
interpolation='bilinear',
lazy=False):
if isinstance(scale, float):
if scale <= 0:
raise ValueError(f'Invalid scale {scale}, must be positive.')
elif isinstance(scale, tuple):
max_long_edge = max(scale)
max_short_edge = min(scale)
if max_short_edge == -1:
# assign np.inf to long edge for rescaling short edge later.
scale = (np.inf, max_long_edge)
else:
raise TypeError(
f'Scale must be float or tuple of int, but got {type(scale)}')
self.scale = scale
self.keep_ratio = keep_ratio
self.interpolation = interpolation
self.lazy = lazy
def _resize_imgs(self, imgs, new_w, new_h):
return [
mmcv.imresize(
img, (new_w, new_h), interpolation=self.interpolation)
for img in imgs
]
@staticmethod
def _resize_kps(kps, scale_factor):
return kps * scale_factor
@staticmethod
def _box_resize(box, scale_factor):
"""Rescale the bounding boxes according to the scale_factor.
Args:
box (np.ndarray): The bounding boxes.
scale_factor (np.ndarray): The scale factor used for rescaling.
"""
assert len(scale_factor) == 2
scale_factor = np.concatenate([scale_factor, scale_factor])
return box * scale_factor
def __call__(self, results):
"""Performs the Resize augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
if 'scale_factor' not in results:
results['scale_factor'] = np.array([1, 1], dtype=np.float32)
img_h, img_w = results['img_shape']
if self.keep_ratio:
new_w, new_h = mmcv.rescale_size((img_w, img_h), self.scale)
else:
new_w, new_h = self.scale
self.scale_factor = np.array([new_w / img_w, new_h / img_h],
dtype=np.float32)
results['img_shape'] = (new_h, new_w)
results['keep_ratio'] = self.keep_ratio
results['scale_factor'] = results['scale_factor'] * self.scale_factor
if not self.lazy:
if 'imgs' in results:
results['imgs'] = self._resize_imgs(results['imgs'], new_w,
new_h)
if 'keypoint' in results:
results['keypoint'] = self._resize_kps(results['keypoint'],
self.scale_factor)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
lazyop['interpolation'] = self.interpolation
if 'gt_bboxes' in results:
assert not self.lazy
results['gt_bboxes'] = self._box_resize(results['gt_bboxes'],
self.scale_factor)
if 'proposals' in results and results['proposals'] is not None:
assert results['proposals'].shape[1] == 4
results['proposals'] = self._box_resize(
results['proposals'], self.scale_factor)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'scale={self.scale}, keep_ratio={self.keep_ratio}, '
f'interpolation={self.interpolation}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class RandomRescale:
"""Randomly resize images so that the short_edge is resized to a specific
size in a given range. The scale ratio is unchanged after resizing.
Required keys are "imgs", "img_shape", "modality", added or modified
keys are "imgs", "img_shape", "keep_ratio", "scale_factor", "resize_size",
"short_edge".
Args:
scale_range (tuple[int]): The range of short edge length. A closed
interval.
interpolation (str): Algorithm used for interpolation:
"nearest" | "bilinear". Default: "bilinear".
"""
def __init__(self, scale_range, interpolation='bilinear'):
self.scale_range = scale_range
# make sure scale_range is legal, first make sure the type is OK
assert mmcv.is_tuple_of(scale_range, int)
assert len(scale_range) == 2
assert scale_range[0] < scale_range[1]
assert np.all([x > 0 for x in scale_range])
self.keep_ratio = True
self.interpolation = interpolation
def __call__(self, results):
"""Performs the Resize augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
short_edge = np.random.randint(self.scale_range[0],
self.scale_range[1] + 1)
resize = Resize((-1, short_edge),
keep_ratio=True,
interpolation=self.interpolation,
lazy=False)
results = resize(results)
results['short_edge'] = short_edge
return results
def __repr__(self):
scale_range = self.scale_range
repr_str = (f'{self.__class__.__name__}('
f'scale_range=({scale_range[0]}, {scale_range[1]}), '
f'interpolation={self.interpolation})')
return repr_str
@PIPELINES.register_module()
class Flip:
"""Flip the input images with a probability.
Reverse the order of elements in the given imgs with a specific direction.
The shape of the imgs is preserved, but the elements are reordered.
Required keys are "img_shape", "modality", "imgs" (optional), "keypoint"
(optional), added or modified keys are "imgs", "keypoint", "lazy" and
"flip_direction". Required keys in "lazy" is None, added or modified key
are "flip" and "flip_direction". The Flip augmentation should be placed
after any cropping / reshaping augmentations, to make sure crop_quadruple
is calculated properly.
Args:
flip_ratio (float): Probability of implementing flip. Default: 0.5.
direction (str): Flip imgs horizontally or vertically. Options are
"horizontal" | "vertical". Default: "horizontal".
flip_label_map (Dict[int, int] | None): Transform the label of the
flipped image with the specific label. Default: None.
left_kp (list[int]): Indexes of left keypoints, used to flip keypoints.
Default: None.
right_kp (list[ind]): Indexes of right keypoints, used to flip
keypoints. Default: None.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
_directions = ['horizontal', 'vertical']
def __init__(self,
flip_ratio=0.5,
direction='horizontal',
flip_label_map=None,
left_kp=None,
right_kp=None,
lazy=False):
if direction not in self._directions:
raise ValueError(f'Direction {direction} is not supported. '
f'Currently support ones are {self._directions}')
self.flip_ratio = flip_ratio
self.direction = direction
self.flip_label_map = flip_label_map
self.left_kp = left_kp
self.right_kp = right_kp
self.lazy = lazy
def _flip_imgs(self, imgs, modality):
_ = [mmcv.imflip_(img, self.direction) for img in imgs]
lt = len(imgs)
if modality == 'Flow':
# The 1st frame of each 2 frames is flow-x
for i in range(0, lt, 2):
imgs[i] = mmcv.iminvert(imgs[i])
return imgs
def _flip_kps(self, kps, kpscores, img_width):
kp_x = kps[..., 0]
kp_x[kp_x != 0] = img_width - kp_x[kp_x != 0]
new_order = list(range(kps.shape[2]))
if self.left_kp is not None and self.right_kp is not None:
for left, right in zip(self.left_kp, self.right_kp):
new_order[left] = right
new_order[right] = left
kps = kps[:, :, new_order]
if kpscores is not None:
kpscores = kpscores[:, :, new_order]
return kps, kpscores
@staticmethod
def _box_flip(box, img_width):
"""Flip the bounding boxes given the width of the image.
Args:
box (np.ndarray): The bounding boxes.
img_width (int): The img width.
"""
box_ = box.copy()
box_[..., 0::4] = img_width - box[..., 2::4]
box_[..., 2::4] = img_width - box[..., 0::4]
return box_
def __call__(self, results):
"""Performs the Flip augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
assert self.direction == 'horizontal', (
'Only horizontal flips are'
'supported for human keypoints')
modality = results['modality']
if modality == 'Flow':
assert self.direction == 'horizontal'
flip = np.random.rand() < self.flip_ratio
results['flip'] = flip
results['flip_direction'] = self.direction
img_width = results['img_shape'][1]
if self.flip_label_map is not None and flip:
results['label'] = self.flip_label_map.get(results['label'],
results['label'])
if not self.lazy:
if flip:
if 'imgs' in results:
results['imgs'] = self._flip_imgs(results['imgs'],
modality)
if 'keypoint' in results:
kp = results['keypoint']
kpscore = results.get('keypoint_score', None)
kp, kpscore = self._flip_kps(kp, kpscore, img_width)
results['keypoint'] = kp
if 'keypoint_score' in results:
results['keypoint_score'] = kpscore
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Use one Flip please')
lazyop['flip'] = flip
lazyop['flip_direction'] = self.direction
if 'gt_bboxes' in results and flip:
assert not self.lazy and self.direction == 'horizontal'
width = results['img_shape'][1]
results['gt_bboxes'] = self._box_flip(results['gt_bboxes'], width)
if 'proposals' in results and results['proposals'] is not None:
assert results['proposals'].shape[1] == 4
results['proposals'] = self._box_flip(results['proposals'],
width)
return results
def __repr__(self):
repr_str = (
f'{self.__class__.__name__}('
f'flip_ratio={self.flip_ratio}, direction={self.direction}, '
f'flip_label_map={self.flip_label_map}, lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class Normalize:
"""Normalize images with the given mean and std value.
Required keys are "imgs", "img_shape", "modality", added or modified
keys are "imgs" and "img_norm_cfg". If modality is 'Flow', additional
keys "scale_factor" is required
Args:
mean (Sequence[float]): Mean values of different channels.
std (Sequence[float]): Std values of different channels.
to_bgr (bool): Whether to convert channels from RGB to BGR.
Default: False.
adjust_magnitude (bool): Indicate whether to adjust the flow magnitude
on 'scale_factor' when modality is 'Flow'. Default: False.
"""
def __init__(self, mean, std, to_bgr=False, adjust_magnitude=False):
if not isinstance(mean, Sequence):
raise TypeError(
f'Mean must be list, tuple or np.ndarray, but got {type(mean)}'
)
if not isinstance(std, Sequence):
raise TypeError(
f'Std must be list, tuple or np.ndarray, but got {type(std)}')
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_bgr = to_bgr
self.adjust_magnitude = adjust_magnitude
def __call__(self, results):
modality = results['modality']
if modality == 'RGB':
n = len(results['imgs'])
h, w, c = results['imgs'][0].shape
imgs = np.empty((n, h, w, c), dtype=np.float32)
for i, img in enumerate(results['imgs']):
imgs[i] = img
for img in imgs:
mmcv.imnormalize_(img, self.mean, self.std, self.to_bgr)
results['imgs'] = imgs
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_bgr=self.to_bgr)
return results
if modality == 'Flow':
num_imgs = len(results['imgs'])
assert num_imgs % 2 == 0
assert self.mean.shape[0] == 2
assert self.std.shape[0] == 2
n = num_imgs // 2
h, w = results['imgs'][0].shape
x_flow = np.empty((n, h, w), dtype=np.float32)
y_flow = np.empty((n, h, w), dtype=np.float32)
for i in range(n):
x_flow[i] = results['imgs'][2 * i]
y_flow[i] = results['imgs'][2 * i + 1]
x_flow = (x_flow - self.mean[0]) / self.std[0]
y_flow = (y_flow - self.mean[1]) / self.std[1]
if self.adjust_magnitude:
x_flow = x_flow * results['scale_factor'][0]
y_flow = y_flow * results['scale_factor'][1]
imgs = np.stack([x_flow, y_flow], axis=-1)
results['imgs'] = imgs
args = dict(
mean=self.mean,
std=self.std,
to_bgr=self.to_bgr,
adjust_magnitude=self.adjust_magnitude)
results['img_norm_cfg'] = args
return results
raise NotImplementedError
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'mean={self.mean}, '
f'std={self.std}, '
f'to_bgr={self.to_bgr}, '
f'adjust_magnitude={self.adjust_magnitude})')
return repr_str
@PIPELINES.register_module()
class CenterCrop(RandomCrop):
"""Crop the center area from images.
Required keys are "img_shape", "imgs" (optional), "keypoint" (optional),
added or modified keys are "imgs", "keypoint", "crop_bbox", "lazy" and
"img_shape". Required keys in "lazy" is "crop_bbox", added or modified key
is "crop_bbox".
Args:
crop_size (int | tuple[int]): (w, h) of crop size.
lazy (bool): Determine whether to apply lazy operation. Default: False.
"""
def __init__(self, crop_size, lazy=False):
self.crop_size = _pair(crop_size)
self.lazy = lazy
if not mmcv.is_tuple_of(self.crop_size, int):
raise TypeError(f'Crop_size must be int or tuple of int, '
f'but got {type(crop_size)}')
def __call__(self, results):
"""Performs the CenterCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, self.lazy)
if 'keypoint' in results:
assert not self.lazy, ('Keypoint Augmentations are not compatible '
'with lazy == True')
img_h, img_w = results['img_shape']
crop_w, crop_h = self.crop_size
left = (img_w - crop_w) // 2
top = (img_h - crop_h) // 2
right = left + crop_w
bottom = top + crop_h
new_h, new_w = bottom - top, right - left
crop_bbox = np.array([left, top, right, bottom])
results['crop_bbox'] = crop_bbox
results['img_shape'] = (new_h, new_w)
if 'crop_quadruple' not in results:
results['crop_quadruple'] = np.array(
[0, 0, 1, 1], # x, y, w, h
dtype=np.float32)
x_ratio, y_ratio = left / img_w, top / img_h
w_ratio, h_ratio = new_w / img_w, new_h / img_h
old_crop_quadruple = results['crop_quadruple']
old_x_ratio, old_y_ratio = old_crop_quadruple[0], old_crop_quadruple[1]
old_w_ratio, old_h_ratio = old_crop_quadruple[2], old_crop_quadruple[3]
new_crop_quadruple = [
old_x_ratio + x_ratio * old_w_ratio,
old_y_ratio + y_ratio * old_h_ratio, w_ratio * old_w_ratio,
h_ratio * old_x_ratio
]
results['crop_quadruple'] = np.array(
new_crop_quadruple, dtype=np.float32)
if not self.lazy:
if 'keypoint' in results:
results['keypoint'] = self._crop_kps(results['keypoint'],
crop_bbox)
if 'imgs' in results:
results['imgs'] = self._crop_imgs(results['imgs'], crop_bbox)
else:
lazyop = results['lazy']
if lazyop['flip']:
raise NotImplementedError('Put Flip at last for now')
# record crop_bbox in lazyop dict to ensure only crop once in Fuse
lazy_left, lazy_top, lazy_right, lazy_bottom = lazyop['crop_bbox']
left = left * (lazy_right - lazy_left) / img_w
right = right * (lazy_right - lazy_left) / img_w
top = top * (lazy_bottom - lazy_top) / img_h
bottom = bottom * (lazy_bottom - lazy_top) / img_h
lazyop['crop_bbox'] = np.array([(lazy_left + left),
(lazy_top + top),
(lazy_left + right),
(lazy_top + bottom)],
dtype=np.float32)
if 'gt_bboxes' in results:
assert not self.lazy
results = self._all_box_crop(results, results['crop_bbox'])
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}(crop_size={self.crop_size}, '
f'lazy={self.lazy})')
return repr_str
@PIPELINES.register_module()
class ThreeCrop:
"""Crop images into three crops.
Crop the images equally into three crops with equal intervals along the
shorter side.
Required keys are "imgs", "img_shape", added or modified keys are "imgs",
"crop_bbox" and "img_shape".
Args:
crop_size(int | tuple[int]): (w, h) of crop size.
"""
def __init__(self, crop_size):
self.crop_size = _pair(crop_size)
if not mmcv.is_tuple_of(self.crop_size, int):
raise TypeError(f'Crop_size must be int or tuple of int, '
f'but got {type(crop_size)}')
def __call__(self, results):
"""Performs the ThreeCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, False)
if 'gt_bboxes' in results or 'proposals' in results:
warnings.warn('ThreeCrop cannot process bounding boxes')
imgs = results['imgs']
img_h, img_w = results['imgs'][0].shape[:2]
crop_w, crop_h = self.crop_size
assert crop_h == img_h or crop_w == img_w
if crop_h == img_h:
w_step = (img_w - crop_w) // 2
offsets = [
(0, 0), # left
(2 * w_step, 0), # right
(w_step, 0), # middle
]
elif crop_w == img_w:
h_step = (img_h - crop_h) // 2
offsets = [
(0, 0), # top
(0, 2 * h_step), # down
(0, h_step), # middle
]
cropped = []
crop_bboxes = []
for x_offset, y_offset in offsets:
bbox = [x_offset, y_offset, x_offset + crop_w, y_offset + crop_h]
crop = [
img[y_offset:y_offset + crop_h, x_offset:x_offset + crop_w]
for img in imgs
]
cropped.extend(crop)
crop_bboxes.extend([bbox for _ in range(len(imgs))])
crop_bboxes = np.array(crop_bboxes)
results['imgs'] = cropped
results['crop_bbox'] = crop_bboxes
results['img_shape'] = results['imgs'][0].shape[:2]
return results
def __repr__(self):
repr_str = f'{self.__class__.__name__}(crop_size={self.crop_size})'
return repr_str
@PIPELINES.register_module()
class TenCrop:
"""Crop the images into 10 crops (corner + center + flip).
Crop the four corners and the center part of the image with the same
given crop_size, and flip it horizontally.
Required keys are "imgs", "img_shape", added or modified keys are "imgs",
"crop_bbox" and "img_shape".
Args:
crop_size(int | tuple[int]): (w, h) of crop size.
"""
def __init__(self, crop_size):
self.crop_size = _pair(crop_size)
if not mmcv.is_tuple_of(self.crop_size, int):
raise TypeError(f'Crop_size must be int or tuple of int, '
f'but got {type(crop_size)}')
def __call__(self, results):
"""Performs the TenCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
_init_lazy_if_proper(results, False)
if 'gt_bboxes' in results or 'proposals' in results:
warnings.warn('TenCrop cannot process bounding boxes')
imgs = results['imgs']
img_h, img_w = results['imgs'][0].shape[:2]
crop_w, crop_h = self.crop_size
w_step = (img_w - crop_w) // 4
h_step = (img_h - crop_h) // 4
offsets = [
(0, 0), # upper left
(4 * w_step, 0), # upper right
(0, 4 * h_step), # lower left
(4 * w_step, 4 * h_step), # lower right
(2 * w_step, 2 * h_step), # center
]
img_crops = list()
crop_bboxes = list()
for x_offset, y_offsets in offsets:
crop = [
img[y_offsets:y_offsets + crop_h, x_offset:x_offset + crop_w]
for img in imgs
]
# import pdb
# pdb.set_trace()
flip_crop = [np.flip(c, axis=1).copy() for c in crop]
bbox = [x_offset, y_offsets, x_offset + crop_w, y_offsets + crop_h]
img_crops.extend(crop)
img_crops.extend(flip_crop)
crop_bboxes.extend([bbox for _ in range(len(imgs) * 2)])
crop_bboxes = np.array(crop_bboxes)
results['imgs'] = img_crops
results['crop_bbox'] = crop_bboxes
results['img_shape'] = results['imgs'][0].shape[:2]
return results
def __repr__(self):
repr_str = f'{self.__class__.__name__}(crop_size={self.crop_size})'
return repr_str
@PIPELINES.register_module()
class MultiGroupCrop:
"""Randomly crop the images into several groups.
Crop the random region with the same given crop_size and bounding box
into several groups.
Required keys are "imgs", added or modified keys are "imgs", "crop_bbox"
and "img_shape".
Args:
crop_size(int | tuple[int]): (w, h) of crop size.
groups(int): Number of groups.
"""
def __init__(self, crop_size, groups):
self.crop_size = _pair(crop_size)
self.groups = groups
if not mmcv.is_tuple_of(self.crop_size, int):
raise TypeError('Crop size must be int or tuple of int, '
f'but got {type(crop_size)}')
if not isinstance(groups, int):
raise TypeError(f'Groups must be int, but got {type(groups)}.')
if groups <= 0:
raise ValueError('Groups must be positive.')
def __call__(self, results):
"""Performs the MultiGroupCrop augmentation.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if 'gt_bboxes' in results or 'proposals' in results:
warnings.warn('MultiGroupCrop cannot process bounding boxes')
imgs = results['imgs']
img_h, img_w = imgs[0].shape[:2]
crop_w, crop_h = self.crop_size
img_crops = []
crop_bboxes = []
for _ in range(self.groups):
x_offset = random.randint(0, img_w - crop_w)
y_offset = random.randint(0, img_h - crop_h)
bbox = [x_offset, y_offset, x_offset + crop_w, y_offset + crop_h]
crop = [
img[y_offset:y_offset + crop_h, x_offset:x_offset + crop_w]
for img in imgs
]
img_crops.extend(crop)
crop_bboxes.extend([bbox for _ in range(len(imgs))])
crop_bboxes = np.array(crop_bboxes)
results['imgs'] = img_crops
results['crop_bbox'] = crop_bboxes
results['img_shape'] = results['imgs'][0].shape[:2]
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}'
f'(crop_size={self.crop_size}, '
f'groups={self.groups})')
return repr_str
@PIPELINES.register_module()
class ColorJitter:
def __init__(self, p=0.8, p_gray=0.2, brightness=0.4, contrast=0.4,saturation=0.2, hue=0.1):
self.p = p
self.p_gray = p_gray
self.worker = torchvision.transforms.ColorJitter(brightness=brightness, contrast=contrast, saturation=saturation, hue=hue)
def __call__(self, results):
imgs = results['imgs']
v = random.random()
if v < self.p:
imgs = [np.asarray(self.worker(Image.fromarray(img))) for img in imgs]
results['imgs'] = imgs
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}')
return repr_str
@PIPELINES.register_module()
class GrayScale:
def __init__(self, p=0.2):
self.p = p
self.worker_gray = torchvision.transforms.Grayscale(num_output_channels=3)
def __call__(self, results):
imgs = results['imgs']
v = random.random()
if v < self.p:
imgs = [np.asarray(self.worker_gray(Image.fromarray(img))) for img in imgs]
results['imgs'] = imgs
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}')
return repr_str
@PIPELINES.register_module()
class Compose:
def __init__(self, transforms):
assert isinstance(transforms, Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError(f'transform must be callable or a dict, '
f'but got {type(transform)}')
def __call__(self, data):
for t in self.transforms:
data = t(data)
if data is None:
return None
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
@PIPELINES.register_module()
class DecordInit:
"""Using decord to initialize the video_reader.
Decord: https://github.com/dmlc/decord
Required keys are "filename",
added or modified keys are "video_reader" and "total_frames".
"""
def __init__(self, io_backend='disk', num_threads=1, **kwargs):
self.io_backend = io_backend
self.num_threads = num_threads
self.kwargs = kwargs
self.file_client = None
self.tarfile = None
def __call__(self, results):
"""Perform the Decord initialization.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
try:
import decord
except ImportError:
raise ImportError(
'Please run "pip install decord" to install Decord first.')
if results['tar'] is False:
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
file_obj = io.BytesIO(self.file_client.get(results['filename']))
else:
if self.tarfile is None:
data_root = os.path.dirname(results['filename']) + '.tar'
self.tarfile = tarfile.open(data_root)
video_name = results['filename'].split('/')[-1]
iob = self.tarfile.extractfile(video_name)
iob = iob.read()
file_obj = io.BytesIO(iob)
# print("*****************")
# print(results['filename'])
# print("*****************")
container = decord.VideoReader(file_obj, num_threads=self.num_threads)
results['video_reader'] = container
results['total_frames'] = len(container)
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'io_backend={self.io_backend}, '
f'num_threads={self.num_threads})')
return repr_str
@PIPELINES.register_module()
class DecordDecode:
"""Using decord to decode the video.
Decord: https://github.com/dmlc/decord
Required keys are "video_reader", "filename" and "frame_inds",
added or modified keys are "imgs" and "original_shape".
"""
def __call__(self, results):
"""Perform the Decord decoding.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
container = results['video_reader']
if results['frame_inds'].ndim != 1:
results['frame_inds'] = np.squeeze(results['frame_inds'])
frame_inds = results['frame_inds']
# Generate frame index mapping in order
frame_dict = {
idx: container[idx].asnumpy()
for idx in np.unique(frame_inds)
}
imgs = [frame_dict[idx] for idx in frame_inds]
results['video_reader'] = None
del container
results['imgs'] = imgs
results['original_shape'] = imgs[0].shape[:2]
results['img_shape'] = imgs[0].shape[:2]
return results
@PIPELINES.register_module()
class SampleFrames:
"""Sample frames from the video.
Required keys are "total_frames", "start_index" , added or modified keys
are "frame_inds", "frame_interval" and "num_clips".
Args:
clip_len (int): Frames of each sampled output clip.
frame_interval (int): Temporal interval of adjacent sampled frames.
Default: 1.
num_clips (int): Number of clips to be sampled. Default: 1.
temporal_jitter (bool): Whether to apply temporal jittering.
Default: False.
twice_sample (bool): Whether to use twice sample when testing.
If set to True, it will sample frames with and without fixed shift,
which is commonly used for testing in TSM model. Default: False.
out_of_bound_opt (str): The way to deal with out of bounds frame
indexes. Available options are 'loop', 'repeat_last'.
Default: 'loop'.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
start_index (None): This argument is deprecated and moved to dataset
class (``BaseDataset``, ``VideoDatset``, ``RawframeDataset``, etc),
see this: https://github.com/open-mmlab/mmaction2/pull/89.
"""
def __init__(self,
clip_len,
frame_interval=1,
num_clips=1,
temporal_jitter=False,
twice_sample=False,
out_of_bound_opt='loop',
test_mode=False,
start_index=None,
frame_uniform=False,
multiview=1):
self.clip_len = clip_len
self.frame_interval = frame_interval
self.num_clips = num_clips
self.temporal_jitter = temporal_jitter
self.twice_sample = twice_sample
self.out_of_bound_opt = out_of_bound_opt
self.test_mode = test_mode
self.frame_uniform = frame_uniform
self.multiview=multiview
assert self.out_of_bound_opt in ['loop', 'repeat_last']
if start_index is not None:
warnings.warn('No longer support "start_index" in "SampleFrames", '
'it should be set in dataset class, see this pr: '
'https://github.com/open-mmlab/mmaction2/pull/89')
def _get_train_clips(self, num_frames):
"""Get clip offsets in train mode.
It will calculate the average interval for selected frames,
and randomly shift them within offsets between [0, avg_interval].
If the total number of frames is smaller than clips num or origin
frames length, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in train mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
avg_interval = (num_frames - ori_clip_len + 1) // self.num_clips
if avg_interval > 0:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = base_offsets + np.random.randint(
avg_interval, size=self.num_clips)
elif num_frames > max(self.num_clips, ori_clip_len):
clip_offsets = np.sort(
np.random.randint(
num_frames - ori_clip_len + 1, size=self.num_clips))
elif avg_interval == 0:
ratio = (num_frames - ori_clip_len + 1.0) / self.num_clips
clip_offsets = np.around(np.arange(self.num_clips) * ratio)
else:
clip_offsets = np.zeros((self.num_clips, ), dtype=np.int)
return clip_offsets
def _get_test_clips(self, num_frames):
"""Get clip offsets in test mode.
Calculate the average interval for selected frames, and shift them
fixedly by avg_interval/2. If set twice_sample True, it will sample
frames together without fixed shift. If the total number of frames is
not enough, it will return all zero indices.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices in test mode.
"""
ori_clip_len = self.clip_len * self.frame_interval
avg_interval = (num_frames - ori_clip_len + 1) / float(self.num_clips)
if num_frames > ori_clip_len - 1:
base_offsets = np.arange(self.num_clips) * avg_interval
clip_offsets = (base_offsets + avg_interval / 2.0).astype(np.int)
if self.twice_sample:
clip_offsets = np.concatenate([clip_offsets, base_offsets])
else:
clip_offsets = np.zeros((self.num_clips, ), dtype=np.int)
return clip_offsets
def _sample_clips(self, num_frames):
"""Choose clip offsets for the video in a given mode.
Args:
num_frames (int): Total number of frame in the video.
Returns:
np.ndarray: Sampled frame indices.
"""
if self.test_mode:
clip_offsets = self._get_test_clips(num_frames)
else:
if self.multiview == 1:
clip_offsets = self._get_train_clips(num_frames)
else:
clip_offsets = np.concatenate([self._get_train_clips(num_frames) for _ in range(self.multiview)])
return clip_offsets
def get_seq_frames(self, num_frames):
"""
Modified from https://github.com/facebookresearch/SlowFast/blob/64abcc90ccfdcbb11cf91d6e525bed60e92a8796/slowfast/datasets/ssv2.py#L159
Given the video index, return the list of sampled frame indexes.
Args:
num_frames (int): Total number of frame in the video.
Returns:
seq (list): the indexes of frames of sampled from the video.
"""
seg_size = float(num_frames - 1) / self.clip_len
seq = []
for i in range(self.clip_len):
start = int(np.round(seg_size * i))
end = int(np.round(seg_size * (i + 1)))
if not self.test_mode:
seq.append(random.randint(start, end))
else:
seq.append((start + end) // 2)
return np.array(seq)
def __call__(self, results):
"""Perform the SampleFrames loading.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
total_frames = results['total_frames']
if self.frame_uniform: # sthv2 sampling strategy
assert results['start_index'] == 0
frame_inds = self.get_seq_frames(total_frames)
else:
clip_offsets = self._sample_clips(total_frames)
frame_inds = clip_offsets[:, None] + np.arange(
self.clip_len)[None, :] * self.frame_interval
frame_inds = np.concatenate(frame_inds)
if self.temporal_jitter:
perframe_offsets = np.random.randint(
self.frame_interval, size=len(frame_inds))
frame_inds += perframe_offsets
frame_inds = frame_inds.reshape((-1, self.clip_len))
if self.out_of_bound_opt == 'loop':
frame_inds = np.mod(frame_inds, total_frames)
elif self.out_of_bound_opt == 'repeat_last':
safe_inds = frame_inds < total_frames
unsafe_inds = 1 - safe_inds
last_ind = np.max(safe_inds * frame_inds, axis=1)
new_inds = (safe_inds * frame_inds + (unsafe_inds.T * last_ind).T)
frame_inds = new_inds
else:
raise ValueError('Illegal out_of_bound option.')
start_index = results['start_index']
frame_inds = np.concatenate(frame_inds) + start_index
results['frame_inds'] = frame_inds.astype(np.int)
results['clip_len'] = self.clip_len
results['frame_interval'] = self.frame_interval
results['num_clips'] = self.num_clips
return results
def __repr__(self):
repr_str = (f'{self.__class__.__name__}('
f'clip_len={self.clip_len}, '
f'frame_interval={self.frame_interval}, '
f'num_clips={self.num_clips}, '
f'temporal_jitter={self.temporal_jitter}, '
f'twice_sample={self.twice_sample}, '
f'out_of_bound_opt={self.out_of_bound_opt}, '
f'test_mode={self.test_mode})')
return repr_str
@PIPELINES.register_module()
class FormatShape:
"""Format final imgs shape to the given input_format.
Required keys are "imgs", "num_clips" and "clip_len", added or modified
keys are "imgs" and "input_shape".
Args:
input_format (str): Define the final imgs format.
collapse (bool): To collpase input_format N... to ... (NCTHW to CTHW,
etc.) if N is 1. Should be set as True when training and testing
detectors. Default: False.
"""
def __init__(self, input_format, collapse=False):
self.input_format = input_format
self.collapse = collapse
if self.input_format not in ['NCTHW', 'NCHW', 'NCHW_Flow', 'NPTCHW']:
raise ValueError(
f'The input format {self.input_format} is invalid.')
def __call__(self, results):
"""Performs the FormatShape formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
if not isinstance(results['imgs'], np.ndarray):
results['imgs'] = np.array(results['imgs'])
imgs = results['imgs']
# [M x H x W x C]
# M = 1 * N_crops * N_clips * L
if self.collapse:
assert results['num_clips'] == 1
if self.input_format == 'NCTHW':
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:])
# N_crops x N_clips x L x H x W x C
imgs = np.transpose(imgs, (0, 1, 5, 2, 3, 4))
# N_crops x N_clips x C x L x H x W
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
# M' x C x L x H x W
# M' = N_crops x N_clips
elif self.input_format == 'NCHW':
imgs = np.transpose(imgs, (0, 3, 1, 2))
# M x C x H x W
elif self.input_format == 'NCHW_Flow':
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:])
# N_crops x N_clips x L x H x W x C
imgs = np.transpose(imgs, (0, 1, 2, 5, 3, 4))
# N_crops x N_clips x L x C x H x W
imgs = imgs.reshape((-1, imgs.shape[2] * imgs.shape[3]) +
imgs.shape[4:])
# M' x C' x H x W
# M' = N_crops x N_clips
# C' = L x C
elif self.input_format == 'NPTCHW':
num_proposals = results['num_proposals']
num_clips = results['num_clips']
clip_len = results['clip_len']
imgs = imgs.reshape((num_proposals, num_clips * clip_len) +
imgs.shape[1:])
# P x M x H x W x C
# M = N_clips x L
imgs = np.transpose(imgs, (0, 1, 4, 2, 3))
# P x M x C x H x W
if self.collapse:
assert imgs.shape[0] == 1
imgs = imgs.squeeze(0)
results['imgs'] = imgs
results['input_shape'] = imgs.shape
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f"(input_format='{self.input_format}')"
return repr_str
@PIPELINES.register_module()
class Collect:
"""Collect data from the loader relevant to the specific task.
This keeps the items in ``keys`` as it is, and collect items in
``meta_keys`` into a meta item called ``meta_name``.This is usually
the last stage of the data loader pipeline.
For example, when keys='imgs', meta_keys=('filename', 'label',
'original_shape'), meta_name='img_metas', the results will be a dict with
keys 'imgs' and 'img_metas', where 'img_metas' is a DataContainer of
another dict with keys 'filename', 'label', 'original_shape'.
Args:
keys (Sequence[str]): Required keys to be collected.
meta_name (str): The name of the key that contains meta infomation.
This key is always populated. Default: "img_metas".
meta_keys (Sequence[str]): Keys that are collected under meta_name.
The contents of the ``meta_name`` dictionary depends on
``meta_keys``.
By default this includes:
- "filename": path to the image file
- "label": label of the image file
- "original_shape": original shape of the image as a tuple
(h, w, c)
- "img_shape": shape of the image input to the network as a tuple
(h, w, c). Note that images may be zero padded on the
bottom/right, if the batch tensor is larger than this shape.
- "pad_shape": image shape after padding
- "flip_direction": a str in ("horiziontal", "vertival") to
indicate if the image is fliped horizontally or vertically.
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
nested (bool): If set as True, will apply data[x] = [data[x]] to all
items in data. The arg is added for compatibility. Default: False.
"""
def __init__(self,
keys,
meta_keys=('filename', 'label', 'original_shape', 'img_shape',
'pad_shape', 'flip_direction', 'img_norm_cfg'),
meta_name='img_metas',
nested=False):
self.keys = keys
self.meta_keys = meta_keys
self.meta_name = meta_name
self.nested = nested
def __call__(self, results):
"""Performs the Collect formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
data = {}
for key in self.keys:
data[key] = results[key]
if len(self.meta_keys) != 0:
meta = {}
for key in self.meta_keys:
meta[key] = results[key]
data[self.meta_name] = DC(meta, cpu_only=True)
if self.nested:
for k in data:
data[k] = [data[k]]
return data
def __repr__(self):
return (f'{self.__class__.__name__}('
f'keys={self.keys}, meta_keys={self.meta_keys}, '
f'nested={self.nested})')
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
if isinstance(data, np.ndarray):
return torch.from_numpy(data)
if isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
if isinstance(data, int):
return torch.LongTensor([data])
if isinstance(data, float):
return torch.FloatTensor([data])
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class ToTensor:
"""Convert some values in results dict to `torch.Tensor` type in data
loader pipeline.
Args:
keys (Sequence[str]): Required keys to be converted.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Performs the ToTensor formating.
Args:
results (dict): The resulting dict to be modified and passed
to the next transform in pipeline.
"""
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return f'{self.__class__.__name__}(keys={self.keys})'
@PIPELINES.register_module()
class RandAugment:
def __init__(self, auto_augment, input_size=224, interpolation='bicubic', level='video'):
if isinstance(input_size, tuple):
img_size = input_size[-2:]
else:
img_size = input_size
if auto_augment:
assert isinstance(auto_augment, str)
if isinstance(img_size, tuple):
img_size_min = min(img_size)
else:
img_size_min = img_size
aa_params = {"translate_const": int(img_size_min * 0.45)}
if interpolation and interpolation != "random":
aa_params["interpolation"] = _pil_interp(interpolation)
self.auto_augment = auto_augment
self.aa_params = aa_params
self.level = level
def do_ops(self, ops, buf):
for op in ops:
buf = op(buf)
return buf
def get_ops(self, ra_ops, num_layers, choice_weights):
return np.random.choice(
ra_ops,
num_layers,
replace=choice_weights is None,
p=choice_weights,
)
def __call__(self, results):
if self.auto_augment.startswith("rand"):
ra_ops, num_layers, choice_weights = rand_augment_transform(self.auto_augment, self.aa_params)
assert results['modality'] == 'RGB', 'Imgaug only support RGB images.'
in_type = results['imgs'][0].dtype.type
if self.level == 'video':
ops = self.get_ops(ra_ops, num_layers, choice_weights)
buffer = [
transforms.ToPILImage()(frame) for frame in results['imgs']
]
results['imgs'] = [
np.asarray(self.do_ops(ops, buf)) for buf in buffer
]
elif self.level == 'image':
buffer = [
transforms.ToPILImage()(frame) for frame in results['imgs']
]
results['imgs'] = []
for buf in buffer:
ops = self.get_ops(ra_ops, num_layers, choice_weights)
buf = self.do_ops(ops, buf)
results['imgs'].append(np.asarray(buf))
else:
assert False, 'Unknown RandAugment config section'
img_h, img_w, _ = results['imgs'][0].shape
out_type = results['imgs'][0].dtype.type
assert in_type == out_type, \
('Imgaug input dtype and output dtype are not the same. ',
f'Convert from {in_type} to {out_type}')
results['img_shape'] = (img_h, img_w)
return results
def __repr__(self):
repr_str = self.__class__.__name__ + f'(transforms={self.aug})'
return repr_str
| 90,339 | 37.344652 | 143 | py |
ILA | ILA-master/datasets/rand_augment.py | """
This implementation is based on
https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/auto_augment.py
pulished under an Apache License 2.0.
COMMENT FROM ORIGINAL:
AutoAugment, RandAugment, and AugMix for PyTorch
This code implements the searched ImageNet policies with various tweaks and
improvements and does not include any of the search code. AA and RA
Implementation adapted from:
https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py
AugMix adapted from:
https://github.com/google-research/augmix
Papers:
AutoAugment: Learning Augmentation Policies from Data
https://arxiv.org/abs/1805.09501
Learning Data Augmentation Strategies for Object Detection
https://arxiv.org/abs/1906.11172
RandAugment: Practical automated data augmentation...
https://arxiv.org/abs/1909.13719
AugMix: A Simple Data Processing Method to Improve Robustness and
Uncertainty https://arxiv.org/abs/1912.02781
Hacked together by / Copyright 2020 Ross Wightman
"""
import math
import numpy as np
import random
import re
import PIL
from PIL import Image, ImageEnhance, ImageOps
_PIL_VER = tuple([int(x) for x in PIL.__version__.split(".")[:2]])
_FILL = (128, 128, 128)
# This signifies the max integer that the controller RNN could predict for the
# augmentation scheme.
_MAX_LEVEL = 10.0
_HPARAMS_DEFAULT = {
"translate_const": 250,
"img_mean": _FILL,
}
_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
def _interpolation(kwargs):
interpolation = kwargs.pop("resample", Image.BILINEAR)
if isinstance(interpolation, (list, tuple)):
return random.choice(interpolation)
else:
return interpolation
def _check_args_tf(kwargs):
if "fillcolor" in kwargs and _PIL_VER < (5, 0):
kwargs.pop("fillcolor")
kwargs["resample"] = _interpolation(kwargs)
def shear_x(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs
)
def shear_y(img, factor, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs
)
def translate_x_rel(img, pct, **kwargs):
pixels = pct * img.size[0]
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs
)
def translate_y_rel(img, pct, **kwargs):
pixels = pct * img.size[1]
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs
)
def translate_x_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs
)
def translate_y_abs(img, pixels, **kwargs):
_check_args_tf(kwargs)
return img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs
)
def rotate(img, degrees, **kwargs):
_check_args_tf(kwargs)
if _PIL_VER >= (5, 2):
return img.rotate(degrees, **kwargs)
elif _PIL_VER >= (5, 0):
w, h = img.size
post_trans = (0, 0)
rotn_center = (w / 2.0, h / 2.0)
angle = -math.radians(degrees)
matrix = [
round(math.cos(angle), 15),
round(math.sin(angle), 15),
0.0,
round(-math.sin(angle), 15),
round(math.cos(angle), 15),
0.0,
]
def transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a * x + b * y + c, d * x + e * y + f
matrix[2], matrix[5] = transform(
-rotn_center[0] - post_trans[0],
-rotn_center[1] - post_trans[1],
matrix,
)
matrix[2] += rotn_center[0]
matrix[5] += rotn_center[1]
return img.transform(img.size, Image.AFFINE, matrix, **kwargs)
else:
return img.rotate(degrees, resample=kwargs["resample"])
def auto_contrast(img, **__):
return ImageOps.autocontrast(img)
def invert(img, **__):
return ImageOps.invert(img)
def equalize(img, **__):
return ImageOps.equalize(img)
def solarize(img, thresh, **__):
return ImageOps.solarize(img, thresh)
def solarize_add(img, add, thresh=128, **__):
lut = []
for i in range(256):
if i < thresh:
lut.append(min(255, i + add))
else:
lut.append(i)
if img.mode in ("L", "RGB"):
if img.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return img.point(lut)
else:
return img
def posterize(img, bits_to_keep, **__):
if bits_to_keep >= 8:
return img
return ImageOps.posterize(img, bits_to_keep)
def contrast(img, factor, **__):
return ImageEnhance.Contrast(img).enhance(factor)
def color(img, factor, **__):
return ImageEnhance.Color(img).enhance(factor)
def brightness(img, factor, **__):
return ImageEnhance.Brightness(img).enhance(factor)
def sharpness(img, factor, **__):
return ImageEnhance.Sharpness(img).enhance(factor)
def _randomly_negate(v):
"""With 50% prob, negate the value"""
return -v if random.random() > 0.5 else v
def _rotate_level_to_arg(level, _hparams):
# range [-30, 30]
level = (level / _MAX_LEVEL) * 30.0
level = _randomly_negate(level)
return (level,)
def _enhance_level_to_arg(level, _hparams):
# range [0.1, 1.9]
return ((level / _MAX_LEVEL) * 1.8 + 0.1,)
def _enhance_increasing_level_to_arg(level, _hparams):
# the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend
# range [0.1, 1.9]
level = (level / _MAX_LEVEL) * 0.9
level = 1.0 + _randomly_negate(level)
return (level,)
def _shear_level_to_arg(level, _hparams):
# range [-0.3, 0.3]
level = (level / _MAX_LEVEL) * 0.3
level = _randomly_negate(level)
return (level,)
def _translate_abs_level_to_arg(level, hparams):
translate_const = hparams["translate_const"]
level = (level / _MAX_LEVEL) * float(translate_const)
level = _randomly_negate(level)
return (level,)
def _translate_rel_level_to_arg(level, hparams):
# default range [-0.45, 0.45]
translate_pct = hparams.get("translate_pct", 0.45)
level = (level / _MAX_LEVEL) * translate_pct
level = _randomly_negate(level)
return (level,)
def _posterize_level_to_arg(level, _hparams):
# As per Tensorflow TPU EfficientNet impl
# range [0, 4], 'keep 0 up to 4 MSB of original image'
# intensity/severity of augmentation decreases with level
return (int((level / _MAX_LEVEL) * 4),)
def _posterize_increasing_level_to_arg(level, hparams):
# As per Tensorflow models research and UDA impl
# range [4, 0], 'keep 4 down to 0 MSB of original image',
# intensity/severity of augmentation increases with level
return (4 - _posterize_level_to_arg(level, hparams)[0],)
def _posterize_original_level_to_arg(level, _hparams):
# As per original AutoAugment paper description
# range [4, 8], 'keep 4 up to 8 MSB of image'
# intensity/severity of augmentation decreases with level
return (int((level / _MAX_LEVEL) * 4) + 4,)
def _solarize_level_to_arg(level, _hparams):
# range [0, 256]
# intensity/severity of augmentation decreases with level
return (int((level / _MAX_LEVEL) * 256),)
def _solarize_increasing_level_to_arg(level, _hparams):
# range [0, 256]
# intensity/severity of augmentation increases with level
return (256 - _solarize_level_to_arg(level, _hparams)[0],)
def _solarize_add_level_to_arg(level, _hparams):
# range [0, 110]
return (int((level / _MAX_LEVEL) * 110),)
LEVEL_TO_ARG = {
"AutoContrast": None,
"Equalize": None,
"Invert": None,
"Rotate": _rotate_level_to_arg,
# There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers
"Posterize": _posterize_level_to_arg,
"PosterizeIncreasing": _posterize_increasing_level_to_arg,
"PosterizeOriginal": _posterize_original_level_to_arg,
"Solarize": _solarize_level_to_arg,
"SolarizeIncreasing": _solarize_increasing_level_to_arg,
"SolarizeAdd": _solarize_add_level_to_arg,
"Color": _enhance_level_to_arg,
"ColorIncreasing": _enhance_increasing_level_to_arg,
"Contrast": _enhance_level_to_arg,
"ContrastIncreasing": _enhance_increasing_level_to_arg,
"Brightness": _enhance_level_to_arg,
"BrightnessIncreasing": _enhance_increasing_level_to_arg,
"Sharpness": _enhance_level_to_arg,
"SharpnessIncreasing": _enhance_increasing_level_to_arg,
"ShearX": _shear_level_to_arg,
"ShearY": _shear_level_to_arg,
"TranslateX": _translate_abs_level_to_arg,
"TranslateY": _translate_abs_level_to_arg,
"TranslateXRel": _translate_rel_level_to_arg,
"TranslateYRel": _translate_rel_level_to_arg,
}
NAME_TO_OP = {
"AutoContrast": auto_contrast,
"Equalize": equalize,
"Invert": invert,
"Rotate": rotate,
"Posterize": posterize,
"PosterizeIncreasing": posterize,
"PosterizeOriginal": posterize,
"Solarize": solarize,
"SolarizeIncreasing": solarize,
"SolarizeAdd": solarize_add,
"Color": color,
"ColorIncreasing": color,
"Contrast": contrast,
"ContrastIncreasing": contrast,
"Brightness": brightness,
"BrightnessIncreasing": brightness,
"Sharpness": sharpness,
"SharpnessIncreasing": sharpness,
"ShearX": shear_x,
"ShearY": shear_y,
"TranslateX": translate_x_abs,
"TranslateY": translate_y_abs,
"TranslateXRel": translate_x_rel,
"TranslateYRel": translate_y_rel,
}
class AugmentOp:
"""
Apply for video.
"""
def __init__(self, name, prob=0.5, magnitude=10, hparams=None):
hparams = hparams or _HPARAMS_DEFAULT
self.aug_fn = NAME_TO_OP[name]
self.level_fn = LEVEL_TO_ARG[name]
self.prob = prob
self.magnitude = magnitude
self.hparams = hparams.copy()
self.kwargs = {
"fillcolor": hparams["img_mean"]
if "img_mean" in hparams
else _FILL,
"resample": hparams["interpolation"]
if "interpolation" in hparams
else _RANDOM_INTERPOLATION,
}
# If magnitude_std is > 0, we introduce some randomness
# in the usually fixed policy and sample magnitude from a normal distribution
# with mean `magnitude` and std-dev of `magnitude_std`.
# NOTE This is my own hack, being tested, not in papers or reference impls.
self.magnitude_std = self.hparams.get("magnitude_std", 0)
def __call__(self, img_list):
if self.prob < 1.0 and random.random() > self.prob:
return img_list
magnitude = self.magnitude
if self.magnitude_std and self.magnitude_std > 0:
magnitude = random.gauss(magnitude, self.magnitude_std)
magnitude = min(_MAX_LEVEL, max(0, magnitude)) # clip to valid range
level_args = (
self.level_fn(magnitude, self.hparams)
if self.level_fn is not None
else ()
)
if isinstance(img_list, list):
return [
self.aug_fn(img, *level_args, **self.kwargs) for img in img_list
]
else:
return self.aug_fn(img_list, *level_args, **self.kwargs)
_RAND_TRANSFORMS = [
"AutoContrast",
"Equalize",
"Invert",
"Rotate",
"Posterize",
"Solarize",
"SolarizeAdd",
"Color",
"Contrast",
"Brightness",
"Sharpness",
"ShearX",
"ShearY",
"TranslateXRel",
"TranslateYRel",
]
_RAND_INCREASING_TRANSFORMS = [
"AutoContrast",
"Equalize",
"Invert",
"Rotate",
"PosterizeIncreasing",
"SolarizeIncreasing",
"SolarizeAdd",
"ColorIncreasing",
"ContrastIncreasing",
"BrightnessIncreasing",
"SharpnessIncreasing",
"ShearX",
"ShearY",
"TranslateXRel",
"TranslateYRel",
]
# These experimental weights are based loosely on the relative improvements mentioned in paper.
# They may not result in increased performance, but could likely be tuned to so.
_RAND_CHOICE_WEIGHTS_0 = {
"Rotate": 0.3,
"ShearX": 0.2,
"ShearY": 0.2,
"TranslateXRel": 0.1,
"TranslateYRel": 0.1,
"Color": 0.025,
"Sharpness": 0.025,
"AutoContrast": 0.025,
"Solarize": 0.005,
"SolarizeAdd": 0.005,
"Contrast": 0.005,
"Brightness": 0.005,
"Equalize": 0.005,
"Posterize": 0,
"Invert": 0,
}
def _select_rand_weights(weight_idx=0, transforms=None):
transforms = transforms or _RAND_TRANSFORMS
assert weight_idx == 0 # only one set of weights currently
rand_weights = _RAND_CHOICE_WEIGHTS_0
probs = [rand_weights[k] for k in transforms]
probs /= np.sum(probs)
return probs
def rand_augment_ops(magnitude=10, hparams=None, transforms=None):
hparams = hparams or _HPARAMS_DEFAULT
transforms = transforms or _RAND_TRANSFORMS
return [
AugmentOp(name, prob=0.5, magnitude=magnitude, hparams=hparams)
for name in transforms
]
class RandAugment:
def __init__(self, ops, num_layers=2, choice_weights=None):
self.ops = ops
self.num_layers = num_layers
self.choice_weights = choice_weights
def __call__(self, img):
# no replacement when using weighted choice
ops = np.random.choice(
self.ops,
self.num_layers,
replace=self.choice_weights is None,
p=self.choice_weights,
)
for op in ops:
img = op(img)
return img
def rand_augment_transform(config_str, hparams):
"""
RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719
Create a RandAugment transform
:param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by
dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining
sections, not order sepecific determine
'm' - integer magnitude of rand augment
'n' - integer num layers (number of transform ops selected per image)
'w' - integer probabiliy weight index (index of a set of weights to influence choice of op)
'mstd' - float std deviation of magnitude noise applied
'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0)
Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5
'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2
:param hparams: Other hparams (kwargs) for the RandAugmentation scheme
:return: A PyTorch compatible Transform
"""
magnitude = _MAX_LEVEL # default to _MAX_LEVEL for magnitude (currently 10)
num_layers = 2 # default to 2 ops per image
weight_idx = None # default to no probability weights for op choice
transforms = _RAND_TRANSFORMS
config = config_str.split("-")
assert config[0] == "rand"
config = config[1:]
for c in config:
cs = re.split(r"(\d.*)", c)
if len(cs) < 2:
continue
key, val = cs[:2]
if key == "mstd":
# noise param injected via hparams for now
hparams.setdefault("magnitude_std", float(val))
elif key == "inc":
if bool(val):
transforms = _RAND_INCREASING_TRANSFORMS
elif key == "m":
magnitude = int(val)
elif key == "n":
num_layers = int(val)
elif key == "w":
weight_idx = int(val)
else:
assert NotImplementedError
ra_ops = rand_augment_ops(
magnitude=magnitude, hparams=hparams, transforms=transforms
)
choice_weights = (
None if weight_idx is None else _select_rand_weights(weight_idx)
)
return ra_ops, num_layers, choice_weights
# return RandAugment(ra_ops, num_layers, choice_weights=choice_weights)
| 16,174 | 29.347092 | 119 | py |
ILA | ILA-master/models/align.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from typing import Optional
import numpy as np
def aligned_mask_generation(point, resolution):
L = resolution
shape = point.size()[:-1]
point = point.reshape(-1, 1, 2)
N = point.size()[0]
element = torch.arange(0, L).to(point.device) / (L - 1) * 2 - 1
element = element.reshape(1, L, 1).expand(N, -1, 2)
element = element - point
first_condition = (-0.25 <= element) * (element <= 0.25) * 1.0
second_condition = 1 - (element.abs() - 0.25) * 1.5
second_condition = (1 - first_condition) * second_condition * (second_condition >= 0)
second_condition = torch.nn.functional.relu(second_condition)
aligned_mask_xy = first_condition + second_condition
aligned_mask_x, aligned_mask_y = aligned_mask_xy[..., 0], aligned_mask_xy[..., 1]
mask = aligned_mask_y.unsqueeze(-1) * aligned_mask_x.unsqueeze(-2)
mask = mask.reshape(*shape, resolution, resolution)
return mask
class SqueezeAndRerange(torch.nn.Module):
def __init__(self, *dim, T):
super().__init__()
self.T = T - 1
if all(v >= 0 for v in dim):
self.dim = sorted(dim, reverse=True)
elif all(v < 0 for v in dim):
self.dim = sorted(dim)
def forward(self, x):
for d in self.dim:
x = torch.squeeze(x, dim=d)
bt, d = x.size()
x = rearrange(x, "(b t) c -> b c t", b=bt // self.T, t=self.T, c=d)
return x
class Depth_Separable_Convolution(nn.Module):
def __init__(self, in_chs, out_chs):
super(Depth_Separable_Convolution, self).__init__()
self.depthwise_conv = nn.Sequential(
nn.Conv2d(
in_channels=in_chs,
out_channels=in_chs,
kernel_size=(3, 3),
stride=(1, 1),
groups=in_chs,
padding=(1, 1),
),
nn.BatchNorm2d(num_features=in_chs),
nn.ReLU(),
)
self.pointwise_conv = nn.Sequential(
nn.Conv2d(
in_channels=in_chs,
out_channels=out_chs,
kernel_size=(1, 1),
stride=(1, 1),
groups=1,
padding=(0, 0),
),
nn.BatchNorm2d(num_features=out_chs),
nn.ReLU(),
)
def forward(self, x):
x = self.depthwise_conv(x)
out = self.pointwise_conv(x)
return out
class ILA(nn.Module):
def __init__(self, T=8, d_model=768, patch_size=16, input_resolution=224, is_training=True):
super().__init__()
self.T = T
self.W = input_resolution // patch_size
self.d_model = d_model
self.is_training = is_training
self.interactive_block = nn.Sequential(
nn.Conv2d(self.d_model * 2, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
)
self.conv = nn.Conv3d(in_channels=128, out_channels=128, kernel_size=(3, 1, 1), padding=(1, 0, 0))
self.fc = nn.Linear(128, self.d_model)
self.prediction_block = nn.Sequential(
nn.Conv2d(256, 128, 3, padding=1),
nn.BatchNorm2d(128),
nn.MaxPool2d((2, 2)),
nn.ReLU(),
nn.Conv2d(128, 128, 3, padding=1),
nn.BatchNorm2d(128),
nn.MaxPool2d((2, 2)),
nn.ReLU(),
nn.AdaptiveMaxPool2d((1, 1)),
SqueezeAndRerange(-2, -1, T=self.T),
nn.Conv1d(128, 64, 1, groups=2),
nn.ReLU(),
nn.Conv1d(64, 4, 1, groups=2),
nn.Tanh()
)
self.prediction_block[-2].weight.data.zero_()
self.prediction_block[-2].bias.data.zero_()
delta = 0.2
self.disturbance_offset = torch.tensor([[0, 0], [0, 1], [1, 0], [0, -1], [-1, 0], [1, 1], [-1, -1], [1, -1], [-1, 1]]).float() * delta
self.disturbance_offset = self.disturbance_offset.reshape(1, 1, 9, 2)
def alignment_parameters(self):
yield from self.prediction_block.parameters()
def align_decay(self):
with torch.no_grad():
self.disturbance_offset *= 0.5
def forward(self, pre, cur, cls_token):
b, t, l, d = pre.size()
B = b
T = self.T-1
W = self.W
H = W
D = d
pre_cls_token = pre[:, :, :1, :]
cur_cls_token = cur[:, :, :1, :]
pre = pre[:, :, 1:, :]
cur = cur[:, :, 1:, :]
pre_projed = rearrange(pre, "b t (h w) d -> b d t h w", b=B, t=T, d=D, h=H, w=W)
cur_projed = rearrange(cur, "b t (h w) d -> b d t h w", b=B, t=T, d=D, h=H, w=W)
pairs = torch.cat([pre_projed, cur_projed], dim=-4)
pairs = pairs.reshape(-1, D * 2, T, H, W)
pairs = rearrange(pairs, "b d t h w -> (b t) d h w", b=B, t=T, d=D*2, h=H, w=W)
interactive_feature = self.interactive_block(pairs)
points = self.prediction_block(interactive_feature).transpose(1, 2)
points = points * 0.75
pre_interactive_feature = interactive_feature[:, :128, :, :]
cur_interactive_feature = interactive_feature[:, 128:, :, :]
pre_interactive_feature = rearrange(pre_interactive_feature, "(b t) d h w -> b d t h w", b=B, t=T, d=128, h=H, w=W)
cur_interactive_feature = rearrange(cur_interactive_feature, "(b t) d h w -> b d t h w", b=B, t=T, d=128, h=H, w=W)
first_interactive_feature = pre_interactive_feature[:, :, :1, :, :]
rest_interactive_feature = (pre_interactive_feature[:, :, 1:, :, :]+cur_interactive_feature[:, :, :-1, :, :])/2
last_interactive_feature = cur_interactive_feature[:, :, -1:, :, :]
interactive_feature = torch.cat([first_interactive_feature, rest_interactive_feature, last_interactive_feature], dim=2)
interactive_feature = self.conv(interactive_feature)
interactive_feature = rearrange(interactive_feature, "b d t h w -> (h w) b t d", b=B, t=T+1, d=128, h=H, w=W)
interactive_feature = self.fc(interactive_feature)
interactive_feature = torch.cat([cls_token, interactive_feature], dim=0)
interactive_feature = rearrange(interactive_feature, "l b t d -> l (b t) d", b=b, t=self.T, d=self.d_model, l=H*W+1)
if self.is_training:
point_first_pair = points[:, :, :2]
point_second_pair = points[:, :, 2:]
point_first_pair = point_first_pair.unsqueeze(2) + self.disturbance_offset.to(point_first_pair.device)
point_second_pair = point_second_pair.unsqueeze(2) + self.disturbance_offset.to(point_second_pair.device)
else:
point_first_pair = points[:, :, :2]
point_second_pair = points[:, :, 2:]
point_first_pair = point_first_pair.unsqueeze(2)
point_second_pair = point_second_pair.unsqueeze(2)
aligned_mask = aligned_mask_generation(point_first_pair, H)
aligned_mask = aligned_mask.mean(2)
aligned_mask = aligned_mask.reshape(B, 1, T, H, W)
pre_aligned = aligned_mask * pre_projed
raw_pre_aligned = pre_aligned
raw_pre_aligned = rearrange(raw_pre_aligned, "b d t h w -> b t (h w) d", b=B, t=T, d=D, h=H, w=W)
raw_pre_aligned = torch.cat([pre_cls_token, raw_pre_aligned], dim=2)
pre_aligned = pre_aligned.sum([-1, -2]).unsqueeze(-1)
pre_aligned = rearrange(pre_aligned, "b d t l -> l b t d", b=B, t=T, l=1, d=D)
aligned_mask = aligned_mask_generation(point_second_pair, H)
aligned_mask = aligned_mask.mean(2)
aligned_mask = aligned_mask.reshape(B, 1, T, H, W)
cur_aligned = aligned_mask * cur_projed
raw_cur_aligned = cur_aligned
raw_cur_aligned = rearrange(raw_cur_aligned, "b d t h w -> b t (h w) d", b=B, t=T, d=D, h=H, w=W)
raw_cur_aligned = torch.cat([cur_cls_token, raw_cur_aligned], dim=2)
cur_aligned = cur_aligned.sum([-1, -2]).unsqueeze(-1)
cur_aligned = rearrange(cur_aligned, "b d t l -> l b t d", b=B, t=T, l=1, d=D)
aligned_frame_first = raw_pre_aligned[:, :1, :, :]
aligned_frame_rest = (raw_pre_aligned[:, 1:, :, :]+raw_cur_aligned[:, :-1, :, :])/2
aligned_frame_last = raw_cur_aligned[:, -1:, :, :]
aligned_frame = torch.cat([aligned_frame_first, aligned_frame_rest, aligned_frame_last], dim=1)
pair = torch.cat([pre_aligned, cur_aligned], dim=3)
return pair, aligned_frame, interactive_feature
if __name__ == '__main__':
model = ILA()
x = torch.randn(8, 7, 197, 768)
cls_token = torch.randn(1, 8, 8, 768)
out, frame, feature = model(x, x, cls_token)
print(out.size())
print(frame.size())
print(feature.size()) | 8,866 | 40.629108 | 142 | py |
ILA | ILA-master/models/mat.py | from collections import OrderedDict
from typing import Tuple
from einops import rearrange, reduce, repeat
from timm.models.layers import trunc_normal_
import torch
from torch import nn
import numpy as np
from torch.utils.checkpoint import checkpoint_sequential
import sys
from models.align import ILA
from models.metrics import cos_similarity_loss, timewise_cos
sys.path.append("../")
from clip.model import LayerNorm, QuickGELU, DropPath
def shift(x, n_segment=3, n_div=8):
bt, l, h, d = x.size()
n_batch = bt // n_segment
fold = d * h // n_div
x = rearrange(x, "(b t) l h d -> b t (h d) l", b=n_batch, t=n_segment, l=l, h=h, d=d)
out = torch.zeros_like(x)
out[:, :-1, :fold] = x[:, 1:, :fold] # shift left
out[:, 1:, fold: 2 * fold] = x[:, :-1, fold: 2 * fold] # shift right
out[:, :, 2 * fold:] = x[:, :, 2 * fold:] # not shift
out = rearrange(out, "b t (h d) l -> (b t) l h d", b=n_batch, t=n_segment, l=l, h=h, d=d)
return out
class TemporalCrossAttention(nn.Module):
def __init__(
self,
spatial_size: Tuple[int, int] = (14, 14),
feature_dim: int = 768,
num_head: int = 12,
T: int = 8,
):
super().__init__()
self.spatial_size = spatial_size
self.num_head = num_head
self.T = T
self.query_proj = nn.Linear(feature_dim, feature_dim)
self.key_proj = nn.Linear(feature_dim, feature_dim)
w_size = np.prod([x * 2 - 1 for x in spatial_size])
self.w1 = nn.Parameter(torch.zeros([w_size, feature_dim]))
self.w2 = nn.Parameter(torch.zeros([w_size, feature_dim]))
idx_tensor = torch.zeros([np.prod(spatial_size) for _ in (0, 1)], dtype=torch.long)
for q in range(np.prod(spatial_size)):
qi, qj = q // spatial_size[1], q % spatial_size[1]
for k in range(np.prod(spatial_size)):
ki, kj = k // spatial_size[1], k % spatial_size[1]
i_offs = qi - ki + spatial_size[0] - 1
j_offs = qj - kj + spatial_size[1] - 1
idx_tensor[q, k] = i_offs * (spatial_size[1] * 2 - 1) + j_offs
self.idx_tensor = idx_tensor
def forward_half(self, q: torch.Tensor, k: torch.Tensor, w: torch.Tensor) -> torch.Tensor:
q, k = q[:, :, 1:], k[:, :, 1:] # remove cls token
assert q.size() == k.size()
assert q.size(2) == np.prod(self.spatial_size)
attn = torch.einsum('ntqhd,ntkhd->ntqkh', q / (q.size(-1) ** 0.5), k)
attn = attn.softmax(dim=-2).mean(dim=-1) # L, L, N, T
self.idx_tensor = self.idx_tensor.to(w.device)
w_unroll = w[self.idx_tensor] # L, L, C
attn = attn.float()
w_unroll = w_unroll.float()
ret = torch.einsum('ntqk,qkc->ntqc', attn, w_unroll)
return ret
def forward(self, q: torch.Tensor, k: torch.Tensor):
NT, L, D = q.size()
N = NT // self.T
T = self.T
assert L == np.prod(self.spatial_size) + 1
q = self.query_proj(q)
k = self.key_proj(k)
q = q.view(NT, L, self.num_head, D // self.num_head)
k = k.view(NT, L, self.num_head, D // self.num_head)
q = rearrange(q, "(b t) l h d -> b t l h d", b=NT // self.T, t=self.T, l=L, h=self.num_head, d=D // self.num_head)
k = rearrange(k, "(b t) l h d -> b t l h d", b=NT // self.T, t=self.T, l=L, h=self.num_head, d=D // self.num_head)
ret = torch.zeros([N, T, L, self.w1.size(-1)], device='cuda')
ret[:, 1:, 1:, :] += self.forward_half(q[:, 1:, :, :, :], k[:, :-1, :, :, :], self.w1)
ret[:, :-1, 1:, :] += self.forward_half(q[:, :-1, :, :, :], k[:, 1:, :, :, :], self.w2)
ret = rearrange(ret, "b t l c -> (b t) l c", b=N, t=T, l=L, c=D)
return ret
class Attention(nn.Module):
'''
A generalized attention module with more flexibility.
'''
def __init__(
self, T: int, q_in_dim: int, k_in_dim: int, v_in_dim: int,
qk_proj_dim: int, v_proj_dim: int, num_heads: int, out_dim: int,
return_all_features: bool = False,
):
super().__init__()
self.T = T
self.q_proj = nn.Linear(q_in_dim, qk_proj_dim)
self.k_proj = nn.Linear(k_in_dim, qk_proj_dim)
self.v_proj = nn.Linear(v_in_dim, v_proj_dim)
self.out_proj = nn.Linear(v_proj_dim, out_dim)
self.num_heads = num_heads
self.return_all_features = return_all_features
assert qk_proj_dim % num_heads == 0 and v_proj_dim % num_heads == 0
self._initialize_weights()
def _initialize_weights(self):
for m in (self.q_proj, self.k_proj, self.v_proj, self.out_proj):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0.)
def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor):
assert q.ndim == 3 and k.ndim == 3 and v.ndim == 3
N = q.size(0)
assert k.size(0) == N and v.size(0) == N
Lq, Lkv = q.size(1), k.size(1)
assert v.size(1) == Lkv
q, k, v = self.q_proj(q), self.k_proj(k), self.v_proj(v)
H = self.num_heads
Cqk, Cv = q.size(-1) // H, v.size(-1) // H
q = q.view(N, Lq, H, Cqk)
k = k.view(N, Lkv, H, Cqk)
v = v.view(N, Lkv, H, Cv)
# =========================Temporal Shift======================= #
# k = shift(k, n_segment=self.T)
# v = shift(v, n_segment=self.T)
aff = torch.einsum('nqhc,nkhc->nqkh', q / (Cqk ** 0.5), k)
aff = aff.softmax(dim=-2)
mix = torch.einsum('nqlh,nlhc->nqhc', aff, v)
out = self.out_proj(mix.flatten(-2))
if self.return_all_features:
return dict(q=q, k=k, v=v, aff=aff, out=out)
else:
return out, q, k, v
class MultiAxisAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, droppath=0., T=0, input_resolution=224, patch_size=16, idx=0):
super().__init__()
self.T = T
self.W = input_resolution // patch_size
self.in_feature_dim = d_model
self.D_hidden_features = 128
self.middle_frame_index = self.T // 2
self.idx = idx
self.input_resolution = input_resolution
self.patch_size = patch_size
self.number_step = input_resolution // patch_size
self.message_fc = nn.Linear(d_model, d_model)
self.message_ln = LayerNorm(d_model)
self.message_attn = ILA(T=self.T, d_model=d_model, patch_size=patch_size,input_resolution=input_resolution)
self.temporal_skip_fc = nn.Linear(d_model, d_model)
self.temporal_skip_fc.weight.data.zero_()
self.temporal_skip_fc.bias.data.zero_()
self.temporal_skip_align_fc = nn.Linear(d_model, d_model)
self.temporal_skip_align_fc.weight.data.zero_()
self.temporal_skip_align_fc.bias.data.zero_()
self.attn = nn.MultiheadAttention(d_model, n_head, )
self.ln_1 = LayerNorm(d_model)
self.drop_path = DropPath(droppath) if droppath > 0. else nn.Identity()
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, feature):
x = feature["input"]
l, bt, d = x.size()
B = bt // self.T
T = self.T
W = self.W
H = W
raw_x = x
align_x = x.view(l, B, self.T, d)
align_x = self.message_fc(align_x)
align_x = self.message_ln(align_x)
cls_token = align_x[:1, :, :, :]
align_x = rearrange(align_x, "l b t c -> b t l c", l=l, b=B, t=self.T, c=d)
support = align_x[:, :-1, :, :]
query = align_x[:, 1:, :, :]
pairs, aligned_frame, interactive_feature = self.message_attn(support, query, cls_token)
cos_loss = cos_similarity_loss(pairs[:, :, :, :self.in_feature_dim], pairs[:, :, :, self.in_feature_dim:])
feature["cosine"].append(cos_loss)
aligned_frame = rearrange(aligned_frame, "b t l d -> l (b t) d", l=l, b=B, t=self.T, d=d)
x = raw_x + self.drop_path(self.temporal_skip_align_fc(aligned_frame))
x = x + self.drop_path(self.temporal_skip_fc(interactive_feature))
x = x + self.drop_path(self.attention(self.ln_1(x)))
x = x[:l, :, :]
x = x + self.drop_path(self.mlp(self.ln_2(x)))
feature["input"] = x
return feature
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, droppath=None,
use_checkpoint=False, T=8, input_resolution=224, patch_size=16, ):
super().__init__()
self.use_checkpoint = use_checkpoint
if droppath is None:
droppath = [0.0 for i in range(layers)]
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(
*[MultiAxisAttentionBlock(width, heads, attn_mask, droppath[i], T, input_resolution, patch_size, idx=i) for i in
range(layers)])
def forward(self, x: torch.Tensor):
feature = {}
cos_loss_list = []
feature["input"] = x
feature["cosine"] = cos_loss_list
if not self.use_checkpoint:
return self.resblocks(feature)
else:
return checkpoint_sequential(self.resblocks, 3, feature)
class MultiAxisTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int,
droppath=None, T=8, use_checkpoint=False, ):
super().__init__()
self.T = T
self.W = input_resolution // patch_size
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, droppath=droppath, use_checkpoint=use_checkpoint, T=T, input_resolution=input_resolution, patch_size=patch_size)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def init_weights(self):
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x: torch.Tensor):
x = self.conv1(x)
x = x.reshape(x.shape[0], x.shape[1], -1)
x = x.permute(0, 2, 1)
x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1)
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2)
feature = self.transformer(x)
x = feature["input"]
x = x.permute(1, 0, 2)
cls_x = self.ln_post(x[:, 0, :])
if self.proj is not None:
cls_x = cls_x @ self.proj
return cls_x, x[:, 1:, :], feature["cosine"]
| 11,947 | 38.17377 | 173 | py |
ILA | ILA-master/models/metrics.py | import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
def timewise_cos(x, y):
l, b, t, c = x.size()
x = rearrange(x, "l b t c -> b t l c", b=b, t=t, l=l, c=c)
y = rearrange(y, "l b t c -> b t l c", b=b, t=t, l=l, c=c)
x = x.squeeze()
y = y.squeeze()
x = F.normalize(x.reshape(b, t, -1), dim=-1, p=2)
y = F.normalize(y.reshape(b, t, -1), dim=-1, p=2)
loss = (1-(x*y).sum(-1)).sum(-1).sum(-1)
return loss
def cos_similarity_loss(x, y):
l, b, t, c = x.size()
x = rearrange(x, "l b t c -> (b t) l c", b=b, t=t, l=l, c=c)
y = rearrange(y, "l b t c -> (b t) l c", b=b, t=t, l=l, c=c)
x = x.squeeze()
y = y.squeeze()
x = F.normalize(x.reshape(b*t, -1), dim=-1, p=2)
y = F.normalize(y.reshape(b*t, -1), dim=-1, p=2)
loss_fn = nn.CosineEmbeddingLoss(reduction='mean')
loss_flag = torch.ones([b*t])
loss_flag = loss_flag.to(x.device)
loss = loss_fn(x, y, loss_flag)
return loss
if __name__ == '__main__':
x = torch.ones(1, 8, 16, 768)
y = torch.ones(1, 8, 16, 768)
out = timewise_cos(x, y)
print(out)
out = cos_similarity_loss(x, y)
print(out) | 1,207 | 28.463415 | 64 | py |
ILA | ILA-master/models/mit.py | import torch
from torch import nn
from collections import OrderedDict
from timm.models.layers import trunc_normal_
import sys
sys.path.append("../")
from clip.model import QuickGELU
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = nn.LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = nn.LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class MultiframeIntegrationTransformer(nn.Module):
def __init__(self, T, embed_dim=512, layers=1, num_classes=400):
super().__init__()
self.T = T
transformer_heads = embed_dim // 64
self.positional_embedding = nn.Parameter(torch.empty(1, T, embed_dim))
trunc_normal_(self.positional_embedding, std=0.02)
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(d_model=embed_dim, n_head=transformer_heads) for _ in range(layers)])
self.backbone_feature_dim = embed_dim
self.num_classes = num_classes
self.classify_head = nn.Sequential(
nn.LayerNorm(self.backbone_feature_dim),
nn.Dropout(0.5),
nn.Linear(self.backbone_feature_dim, self.num_classes),
)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, (nn.Linear,)):
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.LayerNorm):
nn.init.zeros_(m.bias)
nn.init.ones_(m.weight)
def forward(self, x):
ori_x = x
x = x + self.positional_embedding
x = x.permute(1, 0, 2)
x = self.resblocks(x)
x = x.permute(1, 0, 2)
x = x.type(ori_x.dtype) + ori_x
x = x.mean(dim=1, keepdim=False)
x = x / x.norm(dim=-1, keepdim=True)
x = self.classify_head(x)
return x
# return x.mean(dim=1, keepdim=False)
if __name__ == '__main__':
x = torch.randn(8, 8, 768)
model = MultiframeIntegrationTransformer(embed_dim=768, T=8)
out = model(x)
print(out.size())
print(1.e-2) | 2,814 | 32.915663 | 133 | py |
ILA | ILA-master/models/prompt.py | from timm.models.layers import trunc_normal_
import torch
from torch import nn
import sys
sys.path.append("../")
from clip.model import QuickGELU
class MulitHeadAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q_proj = nn.Linear(dim, dim, bias=qkv_bias)
self.k_proj = nn.Linear(dim, dim, bias=qkv_bias)
self.v_proj = nn.Linear(dim, dim, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
# self.talking_head1 = nn.Conv2d(self.num_heads, self.num_heads, kernel_size=1, stride=1, padding=0)
# self.talking_head2 = nn.Conv2d(self.num_heads, self.num_heads, kernel_size=1, stride=1, padding=0)
# self.local_v = nn.Sequential(
# nn.Conv1d(dim, dim, kernel_size=3, stride=1, padding=1, groups=dim),
# nn.BatchNorm1d(dim)
# )
def forward(self, q, k, v):
B, N, C = q.shape
B, M, C = k.shape
q = self.q_proj(q).reshape(B, N, self.num_heads, C // self.num_heads).permute(0,2,1,3)
k = self.k_proj(k).reshape(B, M, self.num_heads, C // self.num_heads).permute(0,2,1,3)
v = self.v_proj(v).reshape(B, M, self.num_heads, C // self.num_heads).permute(0,2,1,3)
attn = (q @ k.transpose(-2, -1)) * self.scale
# attn = self.talking_head1(attn)
attn = attn.softmax(dim=-1)
# attn = self.talking_head2(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class PromptGeneratorLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dropout=0.,
):
super().__init__()
self.cross_attn = MulitHeadAttention(d_model, nhead, proj_drop=dropout)
self.norm1 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.mlp = nn.Sequential(
nn.Linear(d_model, d_model * 4),
QuickGELU(),
nn.Dropout(dropout),
nn.Linear(d_model * 4, d_model)
)
def forward(self, x, visual):
q = k = v = self.norm1(x)
x = x + self.cross_attn(q, visual, visual)
x = x + self.dropout(self.mlp(self.norm3(x)))
return x
class VideoSpecificPrompt(nn.Module):
def __init__(self, layers=2, embed_dim=512, alpha=0.1,):
super().__init__()
self.norm = nn.LayerNorm(embed_dim)
self.decoder = nn.ModuleList([PromptGeneratorLayer(embed_dim, embed_dim//64) for _ in range(layers)])
self.alpha = nn.Parameter(torch.ones(embed_dim) * alpha)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, text, visual):
B, N, C = visual.shape
visual = self.norm(visual)
for layer in self.decoder:
text = layer(text, visual)
return self.alpha * text | 3,565 | 32.641509 | 109 | py |
ILA | ILA-master/models/temporal_shift.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from einops import rearrange, reduce, repeat
import torchvision
from models.mat import MultiAxisTransformer
class TemporalShift(nn.Module):
def __init__(self, net, n_segment=3, n_div=8, inplace=False):
super(TemporalShift, self).__init__()
self.net = net
self.n_segment = n_segment
self.fold_div = n_div
self.inplace = inplace
if inplace:
print('=> Using in-place shift...')
print('=> Using fold div: {}'.format(self.fold_div))
def forward(self, x):
x = self.shift(x, self.n_segment, fold_div=self.fold_div, inplace=self.inplace)
x = self.net(x)
return x
@staticmethod
def shift(x, n_segment, fold_div=3, inplace=False):
bt, c, h, w = x.size()
n_batch = bt // n_segment
x = x.view(n_batch, n_segment, c, h, w)
fold = c // fold_div
if inplace:
raise NotImplementedError
else:
out = torch.zeros_like(x)
out[:, :-1, :fold] = x[:, 1:, :fold] # shift left
out[:, 1:, fold: 2 * fold] = x[:, :-1, fold: 2 * fold] # shift right
out[:, :, 2 * fold:] = x[:, :, 2 * fold:] # not shift
return out.view(bt, c, h, w)
class TemporalShiftVit(nn.Module):
def __init__(self, net, n_segment=3, n_div=8, inplace=False):
super(TemporalShiftVit, self).__init__()
self.net = net
self.n_segment = n_segment
self.fold_div = n_div
self.inplace = inplace
# self.residual_fc = nn.Linear(d_model, d_model)
# self.residual_fc.weight.data.zero_()
# self.residual_fc.bias.data.zero_()
if inplace:
print('=> Using in-place shift...')
print('=> Using fold div: {}'.format(self.fold_div))
def forward(self, x):
x = self.shift(x, self.n_segment, fold_div=self.fold_div, inplace=self.inplace)
x = self.net(x)
return x
@staticmethod
def shift(x, n_segment, fold_div=3, inplace=False):
hw, bt, d = x.size()
cls_tokens = x[0, :, :].unsqueeze(0)
x = x[1:, :, :]
# x = x.permute(1, 2, 0) # bt, d, hw
x = rearrange(x, "l bt d -> bt d l", bt=bt, l=hw-1, d=d)
n_batch = bt // n_segment
h = int(np.sqrt(hw-1))
w = h
x = rearrange(x, "(b t) d (h w) -> b t d h w", b=n_batch, t=n_segment, d=d, h=h, w=w)
# x = x.contiguous().view(n_batch, n_segment, d, h, w)
fold = d // fold_div
if inplace:
raise NotImplementedError
else:
out = torch.zeros_like(x)
out[:, :-1, :fold] = x[:, 1:, :fold] # shift left
out[:, 1:, fold: 2 * fold] = x[:, :-1, fold: 2 * fold] # shift right
out[:, :, 2 * fold:] = x[:, :, 2 * fold:] # not shift
# out = out.contiguous().view(bt, d, h*w)
# out = out.permute(2, 0, 1)
out = rearrange(out, "b t d h w -> (h w) (b t) d", b=n_batch, t=n_segment, d=d, h=h, w=w)
out = torch.cat([cls_tokens, out], dim=0)
return out
class InplaceShift(torch.autograd.Function):
# Special thanks to @raoyongming for the help to this function
@staticmethod
def forward(ctx, input, fold):
# not support higher order gradient
# input = input.detach_()
ctx.fold_ = fold
n, t, c, h, w = input.size()
buffer = input.data.new(n, t, fold, h, w).zero_()
buffer[:, :-1] = input.data[:, 1:, :fold]
input.data[:, :, :fold] = buffer
buffer.zero_()
buffer[:, 1:] = input.data[:, :-1, fold: 2 * fold]
input.data[:, :, fold: 2 * fold] = buffer
return input
@staticmethod
def backward(ctx, grad_output):
# grad_output = grad_output.detach_()
fold = ctx.fold_
n, t, c, h, w = grad_output.size()
buffer = grad_output.data.new(n, t, fold, h, w).zero_()
buffer[:, 1:] = grad_output.data[:, :-1, :fold]
grad_output.data[:, :, :fold] = buffer
buffer.zero_()
buffer[:, :-1] = grad_output.data[:, 1:, fold: 2 * fold]
grad_output.data[:, :, fold: 2 * fold] = buffer
return grad_output, None
class TemporalPool(nn.Module):
def __init__(self, net, n_segment):
super(TemporalPool, self).__init__()
self.net = net
self.n_segment = n_segment
def forward(self, x):
x = self.temporal_pool(x, n_segment=self.n_segment)
return self.net(x)
@staticmethod
def temporal_pool(x, n_segment):
bt, d, h, w = x.size()
n_batch = bt // n_segment
x = x.view(n_batch, n_segment, d, h, w).transpose(1, 2) # b, d, t, h, w
x = F.max_pool3d(x, kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0))
x = x.transpose(1, 2).contiguous().view(bt // 2, d, h, w)
return x
def make_temporal_shift_vit(net, n_segment, n_div=8, place='block', temporal_pool=False):
if temporal_pool:
n_segment_list = [n_segment, n_segment // 2, n_segment // 2, n_segment // 2]
else:
n_segment_list = [n_segment]*4
assert n_segment_list[-1] > 0
print('=> n_segment per stage: {}'.format(n_segment_list))
if isinstance(net, MultiAxisTransformer):
if place == 'block':
def make_block_temporal(stage, this_segment):
blocks = list(stage.children())
print('=> Processing stage with {} blocks'.format(len(blocks)))
for i, b in enumerate(blocks):
blocks[i] = TemporalShiftVit(b, n_segment=this_segment, n_div=n_div)
return nn.Sequential(*(blocks))
net.transformer.resblocks = make_block_temporal(net.transformer.resblocks, n_segment_list[0])
else:
raise NotImplementedError(place)
| 5,893 | 34.506024 | 105 | py |
ILA | ILA-master/models/xclip.py | import os
from collections import OrderedDict
from typing import Tuple, Union
import torch
from torch import nn
import numpy as np
from .mat import MultiAxisTransformer
from .mit import MultiframeIntegrationTransformer
from .prompt import VideoSpecificPrompt
import sys
import warnings
sys.path.append("../")
from clip.model import CLIP,LayerNorm,Transformer
import clip
MODEL_PATH = {
"ViT-B/32": "/PATH/TO/ViT-B-32.pt",
"ViT-B/16": "/PATH/TO/ViT-B-16.pt",
"ViT-L/14": "/PATH/TO/ViT-L-14.pt",
"ViT-L/14@336px": "/PATH/TO/ViT-L-14-336px.pt"
}
def load_state_dict_time(checkpoint_path, use_ema=False):
if checkpoint_path and os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
state_dict_key = 'state_dict'
if isinstance(checkpoint, dict):
if use_ema and 'state_dict_ema' in checkpoint:
state_dict_key = 'state_dict_ema'
if state_dict_key and state_dict_key in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint[state_dict_key].items():
# strip `module.` prefix
name = k[7:] if k.startswith('module') else k
new_state_dict[name] = v
state_dict = new_state_dict
elif 'model_state' in checkpoint:
state_dict_key = 'model_state'
new_state_dict = OrderedDict()
for k, v in checkpoint[state_dict_key].items():
# strip `model.` prefix
name = k[6:] if k.startswith('model') else k
new_state_dict[name] = v
state_dict = new_state_dict
else:
state_dict = checkpoint
# _logger.info("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path))
print("Loaded {} from checkpoint '{}'".format(state_dict_key, checkpoint_path))
return state_dict
else:
# _logger.error("No checkpoint found at '{}'".format(checkpoint_path))
print("No checkpoint found at '{}'".format(checkpoint_path))
raise FileNotFoundError()
class XCLIP(CLIP):
def __init__(self,
embed_dim: int,
# vision
image_resolution: int,
vision_layers: Union[Tuple[int, int, int, int], int],
vision_width: int,
vision_patch_size: int,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int,
# video
T=8,
droppath=0.,
mit_layers=1,
# prompt
prompts_alpha=1e-4,
prompts_layers=1,
# other
use_cache=True,
use_checkpoint=False,
):
super().__init__(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers
)
# self.prompts_generator = VideoSpecificPrompt(layers=prompts_layers, embed_dim=embed_dim, alpha=prompts_alpha,)
# self.use_cache = use_cache
self.mit = MultiframeIntegrationTransformer(T=T, embed_dim=embed_dim, layers=mit_layers,)
dpr = [x.item() for x in torch.linspace(0, droppath, vision_layers)] if droppath > 0. else None
vision_heads = vision_width // 64
self.visual = MultiAxisTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim,
droppath=dpr,
T=T,
use_checkpoint=use_checkpoint,
)
# self.transformer = Transformer(
# width=transformer_width,
# layers=transformer_layers,
# heads=transformer_heads,
# attn_mask=self.build_attention_mask()
# )
# self.vocab_size = vocab_size
# self.token_embedding = nn.Embedding(vocab_size, transformer_width)
# self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
# self.ln_final = LayerNorm(transformer_width)
# self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
# self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
#
# self.cache_text_features = None
# self.prompts_visual_ln = LayerNorm(vision_width)
# self.prompts_visual_proj = nn.Parameter(torch.randn(vision_width, embed_dim))
# self.initialize_parameters()
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'positional_embedding'}
# def initialize_parameters(self):
# nn.init.normal_(self.token_embedding.weight, std=0.02)
# nn.init.normal_(self.positional_embedding, std=0.01)
#
# proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
# attn_std = self.transformer.width ** -0.5
# fc_std = (2 * self.transformer.width) ** -0.5
# for block in self.transformer.resblocks:
# nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
# nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
# nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
# nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
#
# if self.text_projection is not None:
# nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def encode_image(self, image):
return self.visual(image)
# def encode_text(self, text):
# x = self.token_embedding(text)
# eos_indx = text.argmax(dim=-1)
# K, N1, C = x.shape
#
# x = x + self.positional_embedding
# x = x.permute(1, 0, 2) # NLD -> LND
# x = self.transformer(x)
# x = x.permute(1, 0, 2) # LND -> NLD
# x = self.ln_final(x)
# # x.shape = [batch_size, n_ctx, transformer.width]
# # take features from the eot embedding (eot_token is the highest number in each sequence)
# x = x[torch.arange(x.shape[0]), eos_indx] @ self.text_projection
# x = x.reshape(K, -1)
# return x
def encode_video(self, image):
b,t,c,h,w = image.size()
image = image.reshape(-1,c,h,w)
cls_features, img_features, cos_loss_list = self.encode_image(image)
# img_features = self.prompts_visual_ln(img_features)
# img_features = img_features @ self.prompts_visual_proj
cls_features = cls_features.view(b, t, -1)
# img_features = img_features.view(b,t,-1,cls_features.shape[-1])
video_features = self.mit(cls_features)
return video_features, img_features, cos_loss_list
# def cache_text(self, text):
# self.eval()
# with torch.no_grad():
# if self.cache_text_features is None:
# self.cache_text_features = self.encode_text(text)
# self.train()
# return self.cache_text_features
def forward(self, image, text):
b = image.shape[0]
video_features, img_features, cos_loss_list = self.encode_video(image)
# img_features = img_features.mean(dim=1, keepdim=False)
# if self.use_cache:
# text_features = self.cache_text(text)
# else:
# text_features = self.encode_text(text)
# text_features = text_features.unsqueeze(0).expand(b, -1, -1)
# text_features = text_features + self.prompts_generator(text_features, img_features)
# video_features = video_features / video_features.norm(dim=-1, keepdim=True)
# text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# logit_scale = self.logit_scale.exp()
# logits = torch.einsum("bd,bkd->bk", video_features, logit_scale * text_features)
logits = video_features
return logits, cos_loss_list
def build_model(state_dict: dict, T=8, droppath=0., use_checkpoint=False, logger=None, prompts_alpha=1e-1, prompts_layers=2, use_cache=True, mit_layers=4,):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
model = XCLIP(
embed_dim,
image_resolution, vision_layers, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers,
T=T, droppath=droppath, mit_layers=mit_layers,
prompts_alpha=prompts_alpha, prompts_layers=prompts_layers,
use_checkpoint=use_checkpoint, use_cache=use_cache,
)
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in state_dict:
del state_dict[key]
msg = model.load_state_dict(state_dict, strict=False)
logger.info(f"load pretrained CLIP: {msg}")
frozen_list = ["visual.class_embedding", "visual.positional_embedding", "visual.proj", "visual.conv1.weight", "visual.ln_pre.weight", "visual.ln_pre.bias", "visual.ln_post.weight", "visual.ln_post.bias"]
for name, param in model.named_parameters():
for i in range(len(frozen_list)):
if name == frozen_list[i]:
param.requires_grad = False
for i in range(vision_layers):
if "visual.transformer.resblocks.{}.attn".format(i) in name:
param.requires_grad = False
elif "visual.transformer.resblocks.{}.ln_1".format(i) in name:
param.requires_grad = False
elif "visual.transformer.resblocks.{}.mlp".format(i) in name:
param.requires_grad = False
elif "visual.transformer.resblocks.{}.ln_2".format(i) in name:
param.requires_grad = False
return model.eval()
def load(model_path, name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit=True, T=8, droppath=0., use_checkpoint=False, logger=None, use_cache=True, prompts_alpha=1e-1, prompts_layers=2, mit_layers=1,
):
if model_path is None:
model_path = MODEL_PATH[name]
try:
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
model = build_model(state_dict or model.state_dict(), T=T, droppath=droppath,
use_checkpoint=use_checkpoint, logger=logger,
prompts_alpha=prompts_alpha,
prompts_layers=prompts_layers,
use_cache=use_cache,
mit_layers=mit_layers,
)
if str(device) == "cpu":
model.float()
return model, model.state_dict() | 12,571 | 41.187919 | 207 | py |
ILA | ILA-master/utils/__init__.py | 0 | 0 | 0 | py | |
ILA | ILA-master/utils/config.py | import os
import yaml
from yacs.config import CfgNode as CN
_C = CN()
# Base config files
_C.BASE = ['']
_C.DATA = CN()
_C.DATA.ROOT = ''
_C.DATA.TRAIN_FILE = ''
_C.DATA.VAL_FILE = ''
_C.DATA.DATASET = 'kinetics400'
_C.DATA.INPUT_SIZE = 224
_C.DATA.NUM_FRAMES = 8
_C.DATA.NUM_CLASSES = 400
_C.DATA.LABEL_LIST = 'labels/kinetics_400_labels.csv'
_C.MODEL = CN()
_C.MODEL.ARCH = 'ViT-B/32'
_C.MODEL.DROP_PATH_RATE = 0.
_C.MODEL.PRETRAINED = None
_C.MODEL.RESUME = None
_C.MODEL.FIX_TEXT = True
_C.TRAIN = CN()
_C.TRAIN.EPOCHS = 50
_C.TRAIN.WARMUP_EPOCHS = 5
_C.TRAIN.WEIGHT_DECAY = 0.003
_C.TRAIN.LR = 8.e-6
_C.TRAIN.BATCH_SIZE = 8
_C.TRAIN.ACCUMULATION_STEPS = 1
_C.TRAIN.LR_SCHEDULER = 'cosine'
_C.TRAIN.OPTIMIZER = 'adamw'
_C.TRAIN.OPT_LEVEL = 'O1'
_C.TRAIN.AUTO_RESUME = False
_C.TRAIN.USE_CHECKPOINT = False
_C.AUG = CN()
_C.AUG.LABEL_SMOOTH = 0.1
_C.AUG.COLOR_JITTER = 0.8
_C.AUG.GRAY_SCALE = 0.2
_C.AUG.MIXUP = 0.8
_C.AUG.CUTMIX = 1.0
_C.AUG.MIXUP_SWITCH_PROB = 0.5
_C.TEST = CN()
_C.TEST.NUM_CLIP = 1
_C.TEST.NUM_CROP = 1
_C.TEST.ONLY_TEST = False
_C.OUTPUT = ''
_C.SAVE_FREQ = 1
_C.PRINT_FREQ = 50
_C.SEED = 1024
def _update_config_from_file(config, cfg_file):
config.defrost()
with open(cfg_file, 'r') as f:
yaml_cfg = yaml.load(f, Loader=yaml.FullLoader)
for cfg in yaml_cfg.setdefault('BASE', ['']):
if cfg:
_update_config_from_file(
config, os.path.join(os.path.dirname(cfg_file), cfg)
)
print('=> merge config from {}'.format(cfg_file))
config.merge_from_file(cfg_file)
config.freeze()
def update_config(config, args):
_update_config_from_file(config, args.config)
config.defrost()
if args.opts:
config.merge_from_list(args.opts)
# merge from specific arguments
if args.batch_size:
config.TRAIN.BATCH_SIZE = args.batch_size
if args.pretrained:
config.MODEL.PRETRAINED = args.pretrained
if args.resume:
config.MODEL.RESUME = args.resume
if args.accumulation_steps:
config.TRAIN.ACCUMULATION_STEPS = args.accumulation_steps
if args.output:
config.OUTPUT = args.output
if args.only_test:
config.TEST.ONLY_TEST = True
# set local rank for distributed training
config.LOCAL_RANK = args.local_rank
config.freeze()
def get_config(args):
"""Get a yacs CfgNode object with default values."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
config = _C.clone()
update_config(config, args)
return config | 2,596 | 22.1875 | 68 | py |
ILA | ILA-master/utils/helper.py | import numpy
import torch.distributed as dist
import torch
import clip
import os
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def epoch_saving(config, epoch, model, max_accuracy, optimizer, lr_scheduler, logger, working_dir, is_best):
save_state = {'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'max_accuracy': max_accuracy,
'epoch': epoch,
'config': config}
save_path = os.path.join(working_dir, f'ckpt_epoch_{epoch}.pth')
logger.info(f"{save_path} saving......")
torch.save(save_state, save_path)
logger.info(f"{save_path} saved !!!")
if is_best:
best_path = os.path.join(working_dir, f'best.pth')
torch.save(save_state, best_path)
logger.info(f"{best_path} saved !!!")
def load_checkpoint(config, model, optimizer, lr_scheduler, logger):
if os.path.isfile(config.MODEL.RESUME):
logger.info(f"==============> Resuming form {config.MODEL.RESUME}....................")
checkpoint = torch.load(config.MODEL.RESUME, map_location='cpu')
load_state_dict = checkpoint['model']
msg = model.load_state_dict(load_state_dict, strict=False)
logger.info(f"resume model: {msg}")
try:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
start_epoch = checkpoint['epoch'] + 1
max_accuracy = checkpoint['max_accuracy']
logger.info(f"=> loaded successfully '{config.MODEL.RESUME}' (epoch {checkpoint['epoch']})")
del checkpoint
torch.cuda.empty_cache()
return start_epoch, max_accuracy
except:
del checkpoint
torch.cuda.empty_cache()
return 0, 0.
else:
logger.info(("=> no checkpoint found at '{}'".format(config.MODEL.RESUME)))
return 0, 0
def auto_resume_helper(output_dir):
checkpoints = os.listdir(output_dir)
checkpoints = [ckpt for ckpt in checkpoints if ckpt.endswith('pth')]
print(f"All checkpoints founded in {output_dir}: {checkpoints}")
if len(checkpoints) > 0:
latest_checkpoint = max([os.path.join(output_dir, d) for d in checkpoints], key=os.path.getmtime)
print(f"The latest checkpoint founded: {latest_checkpoint}")
resume_file = latest_checkpoint
else:
resume_file = None
return resume_file
def generate_text(data):
text_aug = f"{{}}"
classes = torch.cat([clip.tokenize(text_aug.format(c), context_length=77) for i, c in data.classes])
return classes
| 3,045 | 31.404255 | 108 | py |
ILA | ILA-master/utils/logger.py | import os
import sys
import logging
import functools
from termcolor import colored
@functools.lru_cache()
def create_logger(output_dir, dist_rank=0, name=''):
# create logger
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
# create formatter
fmt = '[%(asctime)s %(name)s] (%(filename)s %(lineno)d): %(levelname)s %(message)s'
color_fmt = colored('[%(asctime)s %(name)s]', 'green') + \
colored('(%(filename)s %(lineno)d)', 'yellow') + ': %(levelname)s %(message)s'
# create console handlers for master process
if dist_rank == 0:
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(
logging.Formatter(fmt=color_fmt, datefmt='%Y-%m-%d %H:%M:%S'))
logger.addHandler(console_handler)
# create file handlers
file_handler = logging.FileHandler(os.path.join(output_dir, f'log_rank{dist_rank}.txt'), mode='a')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(fmt=fmt, datefmt='%Y-%m-%d %H:%M:%S'))
logger.addHandler(file_handler)
return logger
| 1,203 | 33.4 | 102 | py |
ILA | ILA-master/utils/optimizer.py | import copy
import torch.optim as optim
from timm.scheduler.cosine_lr import CosineLRScheduler
import torch.distributed as dist
def is_main_process():
return dist.get_rank() == 0
def check_keywords_in_name(name, keywords=()):
isin = False
for keyword in keywords:
if keyword in name:
isin = True
return isin
def set_weight_decay(model, skip_list=(), skip_keywords=(), weight_decay=0.001, lr=2e-6, have=(), not_have=()):
has_decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(have) > 0 and not check_keywords_in_name(name, have):
continue
if len(not_have) > 0 and check_keywords_in_name(name, not_have):
continue
if len(param.shape) == 1 or name.endswith(".bias") or (name in skip_list) or \
check_keywords_in_name(name, skip_keywords):
no_decay.append(param)
else:
has_decay.append(param)
return [{'params': has_decay, 'weight_decay': weight_decay, 'lr': lr},
{'params': no_decay, 'weight_decay': 0., 'lr': lr}]
def fix_text(model):
for name, param in model.named_parameters():
if "visual." in name or "mit" in name or "prompts" in name:
continue
else:
param.requires_grad=False
def build_optimizer(config, model):
model = model.module if hasattr(model, 'module') else model
# fix text
if config.MODEL.FIX_TEXT:
fix_text(model)
# set decay and lr
skip = {}
skip_keywords = {}
if hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
if hasattr(model, 'no_weight_decay_keywords'):
skip_keywords = model.no_weight_decay_keywords()
# clip_parameters = set_weight_decay(model, skip, skip_keywords,
# weight_decay=config.TRAIN.WEIGHT_DECAY, lr=config.TRAIN.LR,
# have=(), not_have=("prompts", "mit", "message_", "temporal_")
# )
learning_rate_msg = 5.e-4
weight_decay_msg = 1.e-2
msg_parameters = set_weight_decay(model, skip, skip_keywords,
weight_decay=weight_decay_msg, lr=learning_rate_msg,
have=("message_",), not_have=()
)
weight_decay = 0.001
temporal_parameters = set_weight_decay(model, skip, skip_keywords,
weight_decay=weight_decay, lr=config.TRAIN.LR * 10,
have=("temporal_",), not_have=()
)
learning_rate_mit = 4e-4
mit_parameters = set_weight_decay(model, skip, skip_keywords,
weight_decay=config.TRAIN.WEIGHT_DECAY, lr=learning_rate_mit,
have=("mit",), not_have=()
)
# prompts_parameters = set_weight_decay(model, skip, skip_keywords,
# weight_decay=config.TRAIN.WEIGHT_DECAY, lr=learning_rate_mit,
# have=("prompts",), not_have=()
# )
# optimizer = optim.AdamW(mit_parameters + msg_parameters + temporal_parameters + clip_parameters + prompts_parameters,
# betas=(0.9, 0.98), eps=1e-8,)
optimizer = optim.AdamW(msg_parameters + temporal_parameters + mit_parameters, betas=(0.9, 0.98), eps=1e-8, )
return optimizer
def build_scheduler(config, optimizer, n_iter_per_epoch):
num_steps = int(config.TRAIN.EPOCHS * n_iter_per_epoch)
warmup_steps = int(config.TRAIN.WARMUP_EPOCHS * n_iter_per_epoch)
lr_scheduler = CosineLRScheduler(
optimizer,
t_initial=num_steps,
lr_min=config.TRAIN.LR / 100,
warmup_lr_init=0,
warmup_t=warmup_steps,
cycle_limit=1,
t_in_epochs=False,
)
return lr_scheduler | 3,646 | 33.733333 | 123 | py |
null | Multi-domain-learning-FAS-main/README.md | # SiW-Mv2 Dataset and Multi-domain FAS
<p align="center">
<img src="https://github.com/CHELSEA234/Multi-domain-learning-FAS/blob/main/source_SiW_Mv2/figures/dataset_gallery.png" alt="drawing" width="1000"/>
</p>
This project page contains **S**poof **i**n **W**ild with **M**ultiple Attacks **V**ersion 2 (SiW-Mv2) dataset and the official implementation of our ECCV2022 oral paper "Multi-domain Learning for Updating Face Anti-spoofing Models". [[Arxiv]](https://arxiv.org/pdf/2208.11148.pdf) [[SiW-Mv2 Dataset]](http://cvlab.cse.msu.edu/pdfs/guo_liu_jain_liu_eccv2022_supp.pdf)
Authors: [Xiao Guo](https://scholar.google.com/citations?user=Gkc-lAEAAAAJ&hl=en), [Yaojie Liu](https://yaojieliu.github.io/), [Anil Jain](https://www.cse.msu.edu/~jain/), [Xiaoming Liu](http://cvlab.cse.msu.edu/)
👏 **Our algorithm has been officially accepted and delivered to the [IAPRA ODIN](https://www.iarpa.gov/research-programs/odin) program**!
🔥🔥**Check out our quick demo:**
<p float="left">
<img src="source_SiW_Mv2/figures/demo_1.gif" width="300" height="200"/>
<img src="source_SiW_Mv2/figures/demo_2.gif" width="300" height="200"/>
</p>
<p float="left">
<img src="source_SiW_Mv2/figures/demo_3.gif" width="300" height="200"/>
<img src="source_SiW_Mv2/figures/demo_4.gif" width="300" height="200"/>
</p>
The quick view on the code structure.
```bash
./Multi-domain-learning-FAS
├── source_SiW_Mv2 (The spoof detection baseline source code, pre-trained weights and protocol partition files,.)
├── source_multi_domain (The multi-domain updating source code)
└── DRA_form_SIWMv2.pdf (Dataset Release Agreement)
```
Note that the spoof detection baseline is described in the supplementary section of [[Arxiv](https://arxiv.org/pdf/2208.11148.pdf).]
## 1. SiW-Mv2 Introduction:
> Introduction: **SiW-Mv2 Dataset** is a large-scale face anti-spoofing (FAS) dataset that is first introduced in the multi-domain FAS updating algorithm. The SiW-Mv2 dataset includes 14 spoof attack types, and these spoof attack types are designated and verified by the IARPA ODIN program. Also, SiW-Mv2 dataset is a *privacy-aware* dataset, in which ALL live subjects in SiW-Mv2 dataset have signed the consent form which ensures the dataset usage for the research purpose. The more details are can be found in [page](https://github.com/CHELSEA234/Multi-domain-learning-FAS/tree/main/source_SiW_Mv2) and [[paper]](http://cvlab.cse.msu.edu/pdfs/guo_liu_jain_liu_eccv2022_supp.pdf).
## 2. SiW-Mv2 Protocols:
To set a baseline for future study on SiW-Mv2, we define three protocols. Note the partition file for each protocol is fixed, which can be found in `./source_SiW_Mv2/pro_3_text/` of [Dataset Sec.1](https://github.com/CHELSEA234/Multi-domain-learning-FAS/tree/main/source_SiW_Mv2#1-setup-the-environment).
- Protocol I: *Known Spoof Attack Detection*. We divide live subjects and subjects of each spoof pattern into train and test splits. We train the model on the training split and report the overall performance on the test split.
- Protocol II: *Unknown Spoof Attack Detection*. We follow the leave-one-out paradigm — keep $13$ spoof attack and $80$% live subjects as the train split, and use the remaining one spoof attacks and left $20$% live subjects as the test split. We report the test split performance for both individual spoof attacks, as well as the averaged performance with standard deviation.
- Protocol III: *Cross-domain Spoof Detection*. We partition the SiW-Mv2 into $5$ sub-datasets, where each sub-dataset represents novel spoof type, different age and ethnicity distribution, as well as new illuminations. We train the model on the source domain dataset, and evaluate the model on test splits of $5$ different domains. Each sub-dataset performance, and averaged performance with standard deviation are reported
## 3. Baseline Performance
- We implement SRENet as the baseline model, and evaluate this SRENet on three SiW-Mv2 protocols. Please find the details in [[paper]](http://cvlab.cse.msu.edu/pdfs/guo_liu_jain_liu_eccv2022_supp.pdf).
- To quick reproduce the following numerical numbers with `.csv` result files, please go to [Dataset Sec.2](https://github.com/CHELSEA234/Multi-domain-learning-FAS/tree/main/source_SiW_Mv2#2-quick-usage).
<p align="center">
<img src="https://github.com/CHELSEA234/Multi-domain-learning-FAS/blob/main/source_SiW_Mv2/figures/baseline_performance.png" alt="drawing" width="600"/>
</p>
- In `./source_SiW_Mv2`, we provide detailed dataset preprocessing steps as well as the training scripts.
<p align="center">
<img src="https://github.com/CHELSEA234/Multi-domain-learning-FAS/blob/main/source_SiW_Mv2/figures/train_tb.png" alt="drawing" width="500"/>
<img src="https://github.com/CHELSEA234/Multi-domain-learning-FAS/blob/main/source_SiW_Mv2/figures/intermediate_result.png" alt="drawing" width="300"/>
</p>
## 4. Baseline Pre-trained Weights
- Also, pre-trained weights for $3$ different protocols can be found in this [page](https://drive.google.com/drive/folders/106TrDEeH-OOfPP4cWketphMJGXtE9sgW?usp=sharing).
| Protocol | Unknown | Download | Protocol | Unknown | Download | Protocol | Unknown | Download |
|:----:|:--------:|:----:|:----:|:--------:|:----:|:----:|:--------:|:----:|
|I|N/A|[link](https://drive.google.com/drive/folders/1fSoF-Xy1DajQvIdnO8LQtEi-waXr6OaW?usp=sharing)|II|Partial Eyes|[link](https://drive.google.com/drive/folders/1AS6J0aYIUNEv6wkEf_XLWlhqncxIptfi?usp=sharing)|II|Transparent|[link](https://drive.google.com/drive/folders/1S-Pm-iAtYdr2EBgl6qhvOmHKdwcdVw3s?usp=sharing)|
|II|Full Mask|[link](https://drive.google.com/drive/folders/1m2kvmlzOySLISlbuBe3izPazev-IO30J?usp=sharing)|II|Paper Mask|[link](https://drive.google.com/drive/folders/1ng5ax86y_Gvh_DYGJvScPW7bEzA7lY9e?usp=sharing)|II|Obfuscation|[link](https://drive.google.com/drive/folders/1PI_NdjzDsLelU8nyLRTrbYZrFA_X-k-p?usp=sharing)|
|II|Cosmetic|[link](https://drive.google.com/drive/folders/1ck0uDRvTFSzYJUwkMYZyu0KSv046-G6k?usp=sharing)|II|Paper glass|[link](https://drive.google.com/drive/folders/1nOvApxLV5t1IUSxboK0w4RtymHj6sMQ8?usp=sharing)|II|Print|[link](https://drive.google.com/drive/folders/1OlWB0MKjXrrx5Q6UkWVWkygjPNbZ_4ol?usp=sharing)|
|II|Impersonate|[link](https://drive.google.com/drive/folders/1Lt-_h3vqfVJ2f_vtOzr2oOKTVnyve2oz?usp=sharing)|II|Silicone|[link](https://drive.google.com/drive/folders/1bplxEU4G_qs5P9Udy3G3c12FmJC_6kkE?usp=share_link)|II|Replay|[link](https://drive.google.com/drive/folders/1Kkp5awJMvteEGe-9772ms3s3qxH_jj4N?usp=sharing)|
|II|FunnyEyes|[link](https://drive.google.com/drive/folders/1Fs4GxiUr3zMJhoUYb8jX-Raf1WST-o90?usp=sharing)|II|Partial Mouth|[link](https://drive.google.com/drive/folders/1Z-LcrLNv5g7NrgzuF4ba2g80mEpa14p0?usp=share_link)|II|Mannequin|[link](https://drive.google.com/drive/folders/1Lv3byEmeWtgJi23A5_6SC2mkhhLs8VHe?usp=sharing)|
|III|Cross Domain|[link](https://drive.google.com/drive/folders/1Nv2BePpjQgo2YD_CqxQ1Sv99UJn7esPB?usp=sharing)|
## 5. Download
1. SiW-Mv2 database is available under a license from Michigan State University for research purposes. Sign the Dataset Release Agreement [link](https://github.com/CHELSEA234/Multi-domain-learning-FAS/blob/main/DRA_form_SIWMv2.pdf).
2. Submit the request and your signed DRA to `guoxia11@msu.edu` with the following information:
- Title: SiW-Mv2 Application
- CC: Your advisor's email
- Content Line 1: Your name, email, affiliation
- Content Line 2: Your advisor's name, email, webpage
- Attachment: Signed DRA
3. You will receive the download instructions upon approval of your usage of the database.
## Reference
If you would like to use our work, please cite:
```Bibtex
@inproceedings{xiaoguo2022MDFAS,
title={Multi-domain Learning for Updating Face Anti-spoofing Models},
author={Guo, Xiao and Liu, Yaojie and Jain, Anil and Liu, Xiaoming},
booktitle={ECCV},
year={2022}
}
```
This github will continue to update in the near future. If you have any question, please contact: [Xiao Guo](guoxia11@msu.edu)
| 8,062 | 82.123711 | 685 | md |
null | Multi-domain-learning-FAS-main/source_multi_domain/utils.py | from skimage.draw import line_aa
import cv2
import tensorflow as tf
import sys
import glob
import random
import numpy as np
import math as m
import tensorflow.keras.layers as layers
import matplotlib.tri as mtri
from scipy import ndimage, misc
from PIL import Image, ImageDraw
class Logging(object):
def __init__(self, config):
self.config = config
self.losses = {}
self.losses_val = {}
self.txt = ''
self.fig = []
self.fig_val = []
def update(self, losses, training):
if training:
for name in losses.keys():
if name in self.losses:
current_loss = self.losses[name]
self.losses[name] = [current_loss[0]+losses[name], current_loss[1]+1]
else:
self.losses[name] = [losses[name], 1]
else:
for name in losses.keys():
if name in self.losses_val:
current_loss = self.losses_val[name]
self.losses_val[name] = [current_loss[0]+losses[name].numpy(), current_loss[1]+1]
else:
self.losses_val[name] = [losses[name].numpy(), 1]
def display(self, losses, epoch, step, training, allstep):
self.update(losses, training)
if training:
text = 'Epoch (Train) '+str(epoch+1)+'-'+str(step+1)+'/'+str(allstep) + ': '
for _name in self.losses.keys():
value = self.losses[_name]
text += _name+':'+"{:.3g}".format(value[0]/value[1])+', '
else:
text = 'Epoch ( Val ) '+str(epoch+1)+'-'+str(step+1)+'/'+str(allstep) + ': '
for _name in self.losses_val.keys():
value = self.losses_val[_name]
text += _name+':'+"{:.3g}".format(value[0]/value[1])+', '
text = text[:-2]+' '
print(text, end='\r')
self.txt = text
self.epoch = epoch
self.step = step
def display_metric(self, message):
config = self.config
print(message, end='\r')
file_object = open(config.CHECKPOINT_DIR+'/log.txt', 'a')
file_object.write(message+'\n')
file_object.close()
def save(self, fig, training):
config = self.config
step = self.step
if training:
if step % config.IMG_LOG_FR == 0:
fig = self.get_figures(fig)
fname = config.CHECKPOINT_DIR + '/epoch-' + str(self.epoch+1) + '-Train-' + str(self.step+1) + '.png'
cv2.imwrite(fname, fig.numpy())
if step % config.TXT_LOG_FR == 0:
file_object = open(config.CHECKPOINT_DIR+'/log.txt', 'a')
file_object.write(self.txt+'\n')
file_object.close()
else:
if step % (config.IMG_LOG_FR//10) == 0:
fig = self.get_figures(fig)
fname = config.CHECKPOINT_DIR + '/epoch-' + str(self.epoch+1) + '-Val-' + str(self.step+1) + '.png'
cv2.imwrite(fname, fig.numpy())
if step % (config.TXT_LOG_FR//10) == 0:
file_object = open(config.CHECKPOINT_DIR+'/log.txt', 'a')
file_object.write(self.txt+'\n')
file_object.close()
self.fig = []
self.fig_val = []
def save_img(self, fig, fname):
config = self.config
step = self.step
fig = self.get_imgs(fig,256)
fname = config.CHECKPOINT_DIR+'/test/'+fname.split('/')[-1].split('.')[0]+'-result.png'
cv2.imwrite(fname, fig.numpy())
self.fig = []
self.fig_val = []
def reset(self):
losses = {}
losses_val = {}
ind = 0
for _name in self.loss_names:
self.losses[_name] = [0, 0]
self.losses_val[_name] = [0, 0]
ind += 1
self.txt = ''
self.img = 0
def get_imgs(self, fig, size=None):
config = self.config
column = []
for _img in fig:
_img = tf.clip_by_value(_img, 0.0, 1.0)*255
if _img.shape[3] == 1:
_img = tf.concat([_img, _img, _img], axis=3)
else:
r, g, b = tf.split(_img[:,:,:,:3], 3, 3)
_img = tf.concat([b,g,r], 3)
if size is None:
_img = tf.image.resize(_img, [config.FIG_SIZE, config.FIG_SIZE])
else:
_img = tf.image.resize(_img, [config.IMG_SIZE, config.IMG_SIZE])
column.append(_img[0,:,:,:])
column = tf.concat(column, axis=0)
return column
def get_figures(self, fig, size=None):
config = self.config
column = []
for _img in fig:
_img = tf.clip_by_value(_img, 0.0, 1.0)*255
if _img.shape[3] == 1:
_img = tf.concat([_img, _img, _img], axis=3)
else:
r, g, b = tf.split(_img[:,:,:,:3], 3, 3)
_img = tf.concat([b,g,r], 3)
if size is None:
_img = tf.image.resize(_img, [config.FIG_SIZE, config.FIG_SIZE])
else:
_img = tf.image.resize(_img, [config.IMG_SIZE, config.IMG_SIZE])
_row = tf.split(_img, _img.shape[0])
_row = tf.concat(_row, axis=2)
column.append(_row[0,:,:,:])
column = tf.concat(column, axis=0)
return column
def l1_loss(x, y, mask=None):
xshape = x.shape
if mask is not None:
loss = tf.math.reduce_sum(tf.abs(x-y) * mask) / (tf.reduce_sum(mask) + 1e-6) / x.shape[3]
else:
loss = tf.math.reduce_mean(tf.abs(x-y))
return loss
def l2_loss(x, y, mask=None):
xshape = x.shape
if mask is not None:
loss = tf.math.reduce_sum(tf.square(tf.subtract(x, y)) * mask) / (tf.reduce_sum(mask) + 1e-6) / x.shape[3]
else:
loss = tf.math.reduce_mean(tf.square(tf.subtract(x, y)))
return loss
def hinge_loss(y_pred, y_true, mask=None):
return tf.math.reduce_mean(tf.math.maximum(0., 1. - y_true*y_pred))
def generate_face_region(source, img_size):
morelm = np.copy(source[0:17,:])
morelm[:,1] = morelm[0,1] - (morelm[:,1] - morelm[0,1]) * 0.8
source = np.concatenate([source,morelm],axis=0)
'''
img = Image.new('L', (img_size, img_size), 0)
ImageDraw.Draw(img).polygon(source, outline=1, fill=1)
mask = np.array(img)
mask = cv2.GaussianBlur(mask,(5,5),0).reshape([img_size,img_size,1])
'''
xi, yi = np.meshgrid(np.linspace(0, 1, img_size), np.linspace(0, 1, img_size))
# interp2d
_triang = mtri.Triangulation(source[:,0], source[:,1])
_interpx = mtri.LinearTriInterpolator(_triang, source[:,0])
_offsetmapx = _interpx(xi, yi)
offsetmap = np.stack([_offsetmapx], axis=2)
offsetmap = np.nan_to_num(offsetmap)
offsetmap = np.asarray(offsetmap>0,np.float32)
offsetmap = cv2.GaussianBlur(offsetmap,(5,5),0).reshape([img_size,img_size,1])
return offsetmap
def generate_landmark_map(landmark, img_size):
lmlist = [[1,2],[2,3],[3,4],[4,5],[5,6],[6,7],[7,8],[8,9],[9,10],[10,11],[11,12],[12,13],[13,14],[14,15],[15,16],[16,17],
[18,19],[19,20],[20,21],[21,22],[23,24],[24,25],[25,26],[26,27],
[37,38],[38,39],[39,40],[40,41],[41,42],[42,37],[43,44],[44,45],[45,46],[46,47],[47,48],[48,43],
[28,29],[29,30],[30,31],[32,33],[33,34],[34,35],[35,36],
[49,50],[50,51],[51,52],[52,53],[53,54],[54,55],[55,56],[56,57],[57,58],[58,59],[59,60],[60,49],
[61,62],[62,63],[63,64],[64,65],[65,66],[66,67],[67,68],[68,61]]
lm_map = []
img = np.zeros((img_size, img_size), dtype=np.uint8)
lm = landmark*img_size
for pr in lmlist:
lm_start = lm[pr[0]-1,:].astype(np.int32)
lm_end = lm[pr[1]-1,:].astype(np.int32)
rr, cc, val = line_aa(lm_start[0], lm_start[1], lm_end[0], lm_end[1])
templist = [t for t in range(len(rr)) if rr[t] < img_size and rr[t] > 0 ]
rr = rr[templist]
cc = cc[templist]
val = val[templist]
templist = [t for t in range(len(cc)) if cc[t] < img_size and cc[t] > 0 ]
rr = rr[templist]
cc = cc[templist]
val = val[templist]
img[cc, rr] = val * 255
blur = cv2.GaussianBlur(img,(3,3),0)
blur = blur / np.amax(blur) * 255
lm_map = np.reshape(blur, [blur.shape[0], blur.shape[1], 1])
return lm_map
def face_crop_and_resize(img, lm, fsize, box_perturb=[1.15, 1.25], aug=False):
## visualize this function.
img_shape = img.shape
lm_reverse_list = np.array([17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,
27,26,25,24,23,22,21,20,19,18,
28,29,30,31,36,35,34,33,32,
46,45,44,43,48,47,40,39,38,37,42,41,
55,54,53,52,51,50,49,60,59,58,57,56,65,64,63,62,61,68,67,66],np.int32) -1
if aug and lm.shape[0] == 68 and random.uniform(0,1)>0.5:
img = cv2.flip(img, 1) # horizontal reverse.
lm[:,0] = img_shape[1] - lm[:,0]
lm = lm[lm_reverse_list,:]
# center and length of the landmarks.
center = [(np.min(lm[:,0])+np.max(lm[:,0]))/2, (np.min(lm[:,1])+np.max(lm[:,1]))/2]
length = np.max([(np.max(lm[:,0])-np.min(lm[:,0]))/2, (np.max(lm[:,1])-np.min(lm[:,1]))/2]) * 1.1
if aug: # if aug, change the center and length.
center[0] = center[0] + random.uniform(-0.05,0.05)*length
center[1] = center[1] + random.uniform(-0.05,0.05)*length
length = length * random.uniform(0.93,1.07)
## cropping the image.
box = [int(center[0])-int(length),
int(center[1])-int(length*1.2),
int(center[0])+int(length),
int(center[1])+int(length)+int(length)-int(length*1.2)]
box_m = [img_shape[1] - box[2],
box[1],
img_shape[1] - box[0],
box[3]]
lm[:,0] = lm[:,0] - box[0]
lm[:,1] = lm[:,1] - box[1]
preset_x = 0
preset_y = 0
if box[0] < 0 or box[2] > img_shape[1]:
preset_x = max(-box[0], box[2] - img_shape[1])
if box[1] < 0 or box[3] > img_shape[0]:
preset_y = max(-box[1], box[3] - img_shape[0])
if preset_x > 0 or preset_y > 0:
img_large= np.zeros((img_shape[0]+preset_y+preset_y+2,img_shape[1]+preset_x+preset_x+2,img_shape[2]))
img_large[preset_y:preset_y+int(img_shape[0]),preset_x:preset_x+int(img_shape[1]),:] = img
img = img_large
box[0] = box[0] + preset_x
box[1] = box[1] + preset_y
box[2] = box[2] + preset_x
box[3] = box[3] + preset_y
img = img[box[1]:box[3],box[0]:box[2],:]
sz = img.shape[0]
if img.shape[0] == img.shape[1] and img.shape[0]>0:
img = cv2.resize(img, (fsize,fsize))
else:
img = np.zeros((fsize, fsize, img.shape[2]))
return img, lm/(length*2)
def file_reader(filename):
with open(filename, 'r') as f:
filenames_list = f.read().split('\n')
return filenames_list
| 10,107 | 35.359712 | 123 | py |
null | Multi-domain-learning-FAS-main/source_multi_domain/model.py | import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.keras import layers
from warp import tf_batch_map_offsets
class Conv(layers.Layer):
def __init__(self, ch=32, ksize=3, stride=1, norm='batch', nl=True, dropout=False, name=None):
super(Conv, self).__init__()
self.norm = norm
self.conv = layers.Conv2D(ch, (ksize, ksize), strides=(stride, stride), padding='same',name=name)
if norm == 'batch':
# conv + bn
self.bnorm = layers.BatchNormalization()
else:
self.bnorm = None
if norm == 'spec':
# conv + sn
self.conv = tfa.layers.SpectralNormalization(self.conv)
# relu
if nl:
self.relu = layers.LeakyReLU()
else:
self.relu = None
# dropout
if dropout:
self.drop = layers.Dropout(0.3)
else:
self.drop = None
def call(self, x, training):
x = self.conv(x)
if self.bnorm:
x = self.bnorm(x, training)
if self.relu:
x = self.relu(x)
if self.drop:
x = self.drop(x)
return x
class ConvT(layers.Layer):
def __init__(self, ch=32, ksize=3, stride=2, norm='batch', nl=True, dropout=False):
super(ConvT, self).__init__()
self.norm = norm
self.conv = layers.Conv2DTranspose(ch, (ksize, ksize), strides=(stride, stride), padding='same')
if norm == 'batch': # conv + bn
self.bnorm = layers.BatchNormalization()
else:
self.bnorm = None
if norm == 'spec': # conv + sn
self.conv = tfa.layers.SpectralNormalization(self.conv)
if nl: # relu
self.relu = layers.LeakyReLU()
else:
self.relu = None
if dropout: # dropout
self.drop = layers.Dropout(0.3)
else:
self.drop = None
def call(self, x, training):
x = self.conv(x)
if self.bnorm:
x = self.bnorm(x, training)
if self.relu:
x = self.relu(x)
if self.drop:
x = self.drop(x)
return x
class SA(layers.Layer):
def __init__(self, ksize=3):
super(SA, self).__init__()
self.conv1 = Conv(1, ksize=ksize, name='conv')
def call(self, x, training):
xmean = tf.reduce_mean(x, axis=3, keepdims=True)
xmax = tf.reduce_max(x, axis=3, keepdims=True)
xmeanmax = tf.concat([xmean, xmax], axis=3)
y = self.conv1(xmeanmax, training)
return x*tf.sigmoid(y)
class region_estimator(tf.keras.Model):
def __init__(self):
super(region_estimator, self).__init__()
self.up1 = ConvT(64)
self.up2 = ConvT(40)
self.up3 = ConvT(40)
self.up4 = ConvT(40)
self.conv_map1 = Conv(1, ksize=7, norm=False, nl=False)
def call(self, feature, training):
x = self.up1(feature[3])
x = self.up2(tf.concat([x, feature[2]], axis=3), training)
x = self.up3(tf.concat([x, feature[1]], axis=3), training)
x = self.up4(tf.concat([x, feature[0]], axis=3), training)
output_f = self.conv_map1(x)
x = tf.nn.sigmoid(output_f)
return x
class Generator(tf.keras.Model):
def __init__(self, Region_E=None):
super(Generator, self).__init__()
self.RE = Region_E
n_ch = [32,40,64,96,128,192,256]
self.n_ch = n_ch
self.conv0 = Conv(n_ch[0], ksize=7, name='conv0')
self.conv1 = Conv(n_ch[1], name='conv1')
self.conv2 = Conv(n_ch[1], name='conv2')
self.conv3 = Conv(n_ch[1], name='conv3')
self.conv4 = Conv(n_ch[1], name='conv4')
self.conv5 = Conv(n_ch[1], name='conv5')
self.conv6 = Conv(n_ch[1], name='conv6')
self.conv7 = Conv(n_ch[1], name='conv7')
self.conv8 = Conv(n_ch[1], name='conv8')
self.conv9 = Conv(1, ksize=7, norm=False, nl=False, name='conv9')
self.conv10 = Conv(3, ksize=7, norm=False, nl=False, name='conv10')
self.conv11 = Conv(n_ch[4], name='conv11')
self.conv12 = Conv(2, ksize=7, norm=False, nl=False, name='conv12')
self.conv13 = Conv(3, ksize=7, norm=False, nl=False, name='conv13')
self.up1 = ConvT(n_ch[2])
self.up2 = ConvT(n_ch[1])
self.up3 = ConvT(n_ch[1])
self.up4 = ConvT(n_ch[1])
self.up5 = ConvT(n_ch[2])
self.up6 = ConvT(n_ch[1])
self.up7 = ConvT(n_ch[1])
self.up8 = ConvT(n_ch[1])
self.up9 = ConvT(n_ch[2])
self.up10 = ConvT(n_ch[1])
self.up11 = ConvT(n_ch[1])
self.up12 = ConvT(n_ch[1])
self.down1 = Conv(n_ch[4], stride=2)
self.down2 = Conv(n_ch[4], stride=2)
self.down3 = Conv(n_ch[4], stride=2)
self.down4 = Conv(n_ch[4], stride=2)
self.sa1 = SA(7)
self.sa2 = SA(5)
self.sa3 = SA(3)
self.sa4 = SA(3)
self.sa5 = SA(7)
self.sa6 = SA(7)
self.pool2 = layers.AveragePooling2D(pool_size=(2, 2), strides=2)
self.pool4 = layers.AveragePooling2D(pool_size=(4, 4), strides=4)
self.pool8 = layers.AveragePooling2D(pool_size=(8, 8), strides=8)
def call(self, img, training):
## GX: image with different scales.
im_128 = tf.image.resize(self.pool2(img), [256, 256])
im_64 = tf.image.resize(self.pool4(img), [256, 256])
im_32 = tf.image.resize(self.pool8(img), [256, 256])
## GX: create some residual images.
imgd1 = img - im_128
imgd2 = im_128 - im_64
imgd3 = im_64 - im_32
imgd4 = im_32
inputs = tf.concat([imgd1*25, imgd2*15, imgd3*8, imgd4], axis=3)
x0 = self.conv0(inputs, training)
x1_1 = tf.concat([x0, self.conv1(x0, training)], axis=3)
x1_2 = tf.concat([x0, x1_1, self.conv2(x1_1, training)],axis=3)
x1_3 = self.down1(x1_2, training)
x2_1 = tf.concat([x1_3, self.conv3(x1_3, training)], axis=3)
x2_2 = tf.concat([x1_3, x2_1, self.conv4(x2_1, training)],axis=3)
x2_3 = self.down2(x2_2, training)
x3_1 = tf.concat([x2_3, self.conv5(x2_3, training)], axis=3)
x3_2 = tf.concat([x2_3, x3_1, self.conv6(x3_1, training)],axis=3)
x3_3 = self.down3(x3_2, training)
x4_1 = tf.concat([x3_3, self.conv7(x3_3, training)], axis=3)
x4_2 = tf.concat([x3_3, x4_1, self.conv8(x4_1, training)],axis=3)
x4_3 = self.down4(x4_2, training)
region_map = self.RE([x1_3,x2_3,x3_3,x4_3], training=training)
x1_3 = 1e-5*x1_3*tf.image.resize(region_map, [128, 128]) + x1_3
x2_3 = 1e-5*x2_3*tf.image.resize(region_map, [64, 64]) + x2_3
x3_3 = 1e-5*x3_3*tf.image.resize(region_map, [32, 32]) + x3_3
x4_3 = 1e-5*x4_3*tf.image.resize(region_map, [16, 16]) + x4_3
# u, w, v are for the p, n, c respectively.
u1 = self.up1(x4_3, training)
u2 = self.up2(tf.concat([u1, x3_3], axis=3), training)
u3 = self.up3(tf.concat([u2, x2_3], axis=3), training)
u4 = self.up4(tf.concat([u3, x1_3], axis=3), training)
w1 = self.up5(x4_3, training)
w2 = self.up6(tf.concat([w1, x3_3], axis=3), training)
w3 = self.up7(tf.concat([w2, x2_3], axis=3), training)
w4 = self.up8(tf.concat([w3, x1_3], axis=3), training)
v1 = self.up9(x4_3, training)
v2 = self.up10(v1, training)
v3 = self.up11(v2, training)
v4 = self.up12(v3, training)
p = tf.nn.sigmoid(self.conv9(u4, training)) # region
n = tf.nn.tanh(self.conv10(w4, training)/3e2) # additive trace.
c = tf.nn.sigmoid(self.conv13(v4, training)) # content
# ShortCut
d1 = tf.image.resize(self.sa1(x1_3),[32,32])
d2 = tf.image.resize(self.sa2(x2_3),[32,32])
d3 = tf.image.resize(self.sa3(x3_3),[32,32])
d4 = tf.image.resize(self.sa4(x4_3),[32,32])
d5 = tf.image.resize(self.sa5(tf.stop_gradient(u4)),[32,32])
d6 = tf.image.resize(self.sa6(tf.stop_gradient(w4)),[32,32])
ds = tf.concat([d1, d2, d3, d4, d5, d6],3)
x4 = self.conv11(ds, training)
dmap = self.conv12(x4, training)
return dmap, p, c, n, [x1_3,x2_3,x3_3,x4_3], region_map
class Discriminator(tf.keras.Model):
def __init__(self, downsize=1, num_layers=3):
super(Discriminator, self).__init__()
n_ch = [32,64,96,128,128,256]
self.conv1 = Conv(n_ch[0], ksize=4, stride=2, norm=False)
self.conv_stack = []
for i in range(num_layers):
self.conv_stack.append(Conv(n_ch[i], ksize=4, stride=2, norm='batch'))
self.conv2 = Conv(n_ch[2], ksize=4, norm=False, nl=False)
self.downsize = downsize
self.num_layers = num_layers
def call(self, x, training):
if self.downsize > 1:
_,w,h,_ = x.shape
x=tf.image.resize(x,(w//self.downsize,h//self.downsize))
x = self.conv1(x, training)
for i in range(self.num_layers):
x = self.conv_stack[i](x, training)
x = self.conv2(x)
return tf.split(x,2,axis=0) | 9,164 | 37.029046 | 105 | py |
null | Multi-domain-learning-FAS-main/source_multi_domain/dataset.py | # Copyright 2022
#
# Authors: Xiao Guo, Yaojie Liu, Anil Jain, and Xiaoming Liu.
#
# All Rights Reserved.s
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
import cv2
import tensorflow as tf
import glob
import random
import numpy as np
from natsort import natsorted, ns
from utils import face_crop_and_resize
from warp import generate_uv_map
from parameters import uv, lm_ref, RANDOM_SEED, REPEAT_TIME_LI, REPEAT_TIME_SP, SAMPLE_NUM_TRAIN, SAMPLE_NUM_TEST
autotune = tf.data.experimental.AUTOTUNE
autotune = -1
uv = np.transpose(np.asarray(uv, dtype=np.float32))
lm_ref = np.transpose(np.asarray(lm_ref, dtype=np.float32))/256.
def get_dmap_and_stype(config, lm, dataset, stype):
dmap0 = generate_uv_map(lm, uv, config.IMG_SIZE)
dmap_up = np.copy(dmap0)
dmap_up[config.IMG_SIZE//2:,:,:]=0
dmap_bot = np.copy(dmap0)
dmap_bot[:config.IMG_SIZE//2,:,:]=0
if stype == 'Live':
n_stype = [1,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
dmap = np.concatenate([dmap0, dmap0*0], axis=2)
elif stype == 'Makeup_Co':
n_stype = [0,1,0,0,0,0,0,0,0,0,0,0,0,0,0]
dmap = np.concatenate([np.zeros_like(dmap0), dmap0], axis=2)
elif stype == 'Makeup_Im':
n_stype = [0,0,1,0,0,0,0,0,0,0,0,0,0,0,0]
dmap = np.concatenate([np.zeros_like(dmap0), dmap0], axis=2)
elif stype == 'Makeup_Ob':
n_stype = [0,0,0,1,0,0,0,0,0,0,0,0,0,0,0]
dmap = np.concatenate([np.zeros_like(dmap0), dmap0], axis=2)
elif stype == 'Mask_Half':
n_stype = [0,0,0,0,1,0,0,0,0,0,0,0,0,0,0]
dmap = np.concatenate([np.zeros_like(dmap0), dmap0], axis=2)
elif stype == 'Mask_Silicone':
n_stype = [0,0,0,0,0,1,0,0,0,0,0,0,0,0,0]
dmap = np.concatenate([np.zeros_like(dmap0), dmap0], axis=2)
elif stype == 'Mask_Trans':
n_stype = [0,0,0,0,0,0,1,0,0,0,0,0,0,0,0]
dmap = np.concatenate([np.zeros_like(dmap0), dmap0], axis=2)
elif stype == 'Mask_Paper':
n_stype = [0,0,0,0,0,0,0,1,0,0,0,0,0,0,0]
dmap = np.concatenate([np.zeros_like(dmap0), dmap0], axis=2)
elif stype == 'Mask_Mann':
n_stype = [0,0,0,0,0,0,0,0,1,0,0,0,0,0,0]
dmap = np.concatenate([np.zeros_like(dmap0), dmap0], axis=2)
elif stype == 'Partial_Funnyeye':
n_stype = [0,0,0,0,0,0,0,0,0,1,0,0,0,0,0]
dmap = np.concatenate([dmap_bot, dmap_up], axis=2)
elif stype == 'Partial_Eye':
n_stype = [0,0,0,0,0,0,0,0,0,0,1,0,0,0,0]
dmap = np.concatenate([dmap_bot, dmap_up], axis=2)
elif stype == 'Partial_Mouth':
n_stype = [0,0,0,0,0,0,0,0,0,0,0,1,0,0,0]
dmap = np.concatenate([dmap_up, dmap_bot], axis=2)
elif stype == 'Partial_Paperglass':
n_stype = [0,0,0,0,0,0,0,0,0,0,0,0,1,0,0]
dmap = np.concatenate([dmap_bot, dmap_up], axis=2)
elif stype == 'Replay':
n_stype = [0,0,0,0,0,0,0,0,0,0,0,0,0,1,0]
dmap = np.concatenate([np.zeros_like(dmap0), np.ones_like(dmap0)], axis=2)
elif stype == 'Paper':
n_stype = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
dmap = np.concatenate([np.zeros_like(dmap0), np.ones_like(dmap0)], axis=2)
else:
assert False, print(f"{stype} is invalid....")
return dmap, n_stype
class Dataset():
def __init__(self, config, mode, dset=None):
self.config = config
self.mode = mode
self.dset = dset
if mode == 'train':
data_dir_li = self.config.LI_DATA_DIR
data_dir_sp = self.config.SP_DATA_DIR
elif mode == 'val':
data_dir_li = self.config.LI_DATA_DIR_VAL
data_dir_sp = self.config.SP_DATA_DIR_VAL
elif 'test_A' in mode:
data_dir_li = self.config.LI_DATA_DIR_TEST
data_dir_sp = self.config.SP_DATA_DIR_TEST
elif 'test_B' in mode:
data_dir_li = self.config.LI_DATA_DIR_TEST_B
data_dir_sp = self.config.SP_DATA_DIR_TEST_B
self.data_folders = None
self.data_samples = None
self.input_tensors, self.name_list = self.inputs(data_dir_li, data_dir_sp)
self.feed = iter(self.input_tensors)
def __len__(self):
return len(self.name_list)
def _info(self):
return len(self.data_samples)
def nextit(self):
return next(self.feed)
def _return_list(self, dir):
dir_list = []
for _ in dir:
_list = glob.glob(_)
dir_list += _list
return dir_list
def _extend_list(self, vd_list):
new_list = []
for idx, _file in enumerate(vd_list):
meta = glob.glob(_file+'/*.png')
meta.sort()
random.seed(RANDOM_SEED)
meta = random.sample(meta, 20)
new_list += meta
return new_list
def inputs(self, data_dir_li, data_dir_sp):
mode = self.mode
protocol = self.config.SET
if mode == 'train' or mode == 'val':
li_data_samples = data_dir_li if self.config.dataset == 'oulu' else self._return_list(data_dir_li)
sp_data_samples = data_dir_sp if self.config.dataset == 'oulu' else self._return_list(data_dir_sp)
data_samples = [li_data_samples, sp_data_samples]
li_data_samples = REPEAT_TIME_LI * li_data_samples
sp_data_samples = REPEAT_TIME_SP * sp_data_samples
li_data_samples = li_data_samples[:SAMPLE_NUM_TRAIN] if mode == 'train' else li_data_samples[:SAMPLE_NUM_TEST]
sp_data_samples = sp_data_samples[:SAMPLE_NUM_TRAIN] if mode == 'train' else sp_data_samples[:SAMPLE_NUM_TEST]
shuffle_buffer_size = min(len(li_data_samples), len(sp_data_samples))
dataset = tf.data.Dataset.from_tensor_slices((li_data_samples, sp_data_samples))
dataset = dataset.shuffle(shuffle_buffer_size).repeat(-1)
if mode == 'train':
dataset = dataset.map(map_func=self.parse_fn, num_parallel_calls=autotune)
elif mode == 'val':
dataset = dataset.map(map_func=self.parse_fn_val, num_parallel_calls=autotune)
dataset = dataset.batch(batch_size=self.config.BATCH_SIZE).prefetch(buffer_size=autotune)
elif 'test_A' in mode or 'test_B' in mode:
li_data_samples = self._return_list(data_dir_li)
sp_data_samples = self._return_list(data_dir_sp)
if 'csv' not in mode:
random.seed(RANDOM_SEED)
li_data_samples = random.sample(li_data_samples, 50) if len(li_data_samples) >= 50 else \
random.sample(li_data_samples, len(li_data_samples))
random.seed(RANDOM_SEED)
sp_data_samples = random.sample(sp_data_samples, 50) if len(sp_data_samples) >= 50 else \
random.sample(sp_data_samples, len(sp_data_samples))
self.data_folders = li_data_samples + sp_data_samples
data_samples = self._extend_list(self.data_folders)
dataset = tf.data.Dataset.from_tensor_slices(data_samples)
dataset = dataset.cache()
dataset = dataset.map(map_func=self.parse_fn_test)
dataset = dataset.batch(batch_size=self.config.BATCH_SIZE).prefetch(buffer_size=autotune)
self.data_samples = data_samples
return dataset, data_samples
def _img_parse(self, file_name):
file_name = file_name.decode('UTF-8')
meta = glob.glob(file_name + '/*.png')
try:
im_name = meta[random.randint(0, len(meta) - 1)]
except:
print(file_name)
print(meta)
import sys;sys.exit(0)
lm_name = im_name[:-3] + 'npy'
parts = file_name.split('/')
dataset = self.config.dataset
if dataset == 'SiWM-v2':
stype = parts[-1].split('_')[:-1]
stype = '_'.join(stype)
elif dataset == 'SiW':
spoof_id = int(parts[-1].split("-")[2])
if spoof_id == 1:
stype = 'Live'
elif spoof_id == 2:
stype = 'Paper'
else:
stype = 'Replay'
elif dataset == 'oulu':
# device_id, bg_id, sub_id, spoof_id
spoof_id = int(parts[-1].split('_')[-1])
if spoof_id == 1:
stype = "Live"
elif spoof_id in [2,3]:
stype = 'Paper'
elif spoof_id in [4,5]:
stype = 'Replay'
else:
assert False, print("Please offer the valid dataset...")
return im_name, lm_name, dataset, stype
def _img_preprocess(self, file_name, dataset=None):
while True:
im_name, lm_name, dataset_, stype = self._img_parse(file_name)
dataset = self.config.dataset
img = cv2.cvtColor(cv2.imread(im_name), cv2.COLOR_BGR2RGB) / 255.
lm = np.load(lm_name)
img, lm = face_crop_and_resize(img, lm, self.config.IMG_SIZE, aug=True)
try:
dmap, n_stype = get_dmap_and_stype(self.config, lm, dataset, stype)
n_stype = np.reshape(np.array([n_stype], np.float32), (-1))
except:
print(f"{file_name} cannot work on get_dmap_and_stype.")
continue
else:
break
return img, dmap, n_stype, lm, dataset
def _parse_function(self, _file1, _file2):
img1, dmap1, n_stype1, lm1, dataset = self._img_preprocess(_file1)
img2, dmap2, n_stype2, lm2, _ = self._img_preprocess(_file2, dataset)
reg = img1 # dummy code.
return img1.astype(np.float32), img2.astype(np.float32), \
dmap1.astype(np.float32), dmap2.astype(np.float32), \
n_stype1.astype(np.float32), n_stype2.astype(np.float32), \
reg.astype(np.float32)
def parse_fn_val(self, file1, file2):
config = self.config
_img1, _img2, _dmap1, _dmap2, _stype1, _stype2, _reg = \
tf.numpy_function(self._parse_function,
[file1, file2],
[tf.float32, tf.float32, tf.float32,
tf.float32, tf.float32, tf.float32,
tf.float32])
_img1 = tf.ensure_shape(_img1, [config.IMG_SIZE, config.IMG_SIZE, 3])
_img2 = tf.ensure_shape(_img2, [config.IMG_SIZE, config.IMG_SIZE, 3])
_dmap1 = tf.ensure_shape(_dmap1,[config.IMG_SIZE, config.IMG_SIZE, 2])
_dmap2 = tf.ensure_shape(_dmap2,[config.IMG_SIZE, config.IMG_SIZE, 2])
_stype1 = tf.ensure_shape(_stype1, [15])
_stype2 = tf.ensure_shape(_stype2, [15])
_reg = tf.ensure_shape(_reg,[config.IMG_SIZE, config.IMG_SIZE, 3])
return _img1, _img2, _dmap1, _dmap2, _stype1, _stype2, _reg
def parse_fn(self, file1, file2):
config = self.config
_img1, _img2, _dmap1, _dmap2, _stype1, _stype2, _reg = self.parse_fn_val(file1, file2)
# Data augmentation.
_img1a = tf.image.random_contrast(_img1, 0.9, 1.1)+ tf.random.uniform([1, 1, 3], minval=-0.03, maxval=0.03)
_img1a = tf.cond(tf.greater(tf.random.uniform([1], 0, 1)[0], 0.5),lambda: _img1a, lambda: _img1)
_img2a = tf.image.random_contrast(_img2, 0.9, 1.1)+ tf.random.uniform([1, 1, 3], minval=-0.03, maxval=0.03)
_img2a = tf.cond(tf.greater(tf.random.uniform([1],0,1)[0],0.5),lambda: _img2a, lambda: _img2)
return _img1a, _img2a, _dmap1, _dmap2, _stype1, _stype2, _reg
def parse_fn_test(self, file):
config = self.config
def _parse_function(_file):
_file = _file.decode('UTF-8')
im_name = _file
lm_name = im_name[:-3] + 'npy'
dataset = config.dataset
img = cv2.cvtColor(cv2.imread(im_name), cv2.COLOR_BGR2RGB) / 255.
lm = np.load(lm_name)
img, lm = face_crop_and_resize(img, lm, config.IMG_SIZE, aug=False)
return img.astype(np.float32), im_name
image, im_name = tf.numpy_function(_parse_function, [file], [tf.float32, tf.string])
image = tf.ensure_shape(image, [config.IMG_SIZE, config.IMG_SIZE, 3])
return image, im_name
| 12,987 | 46.229091 | 122 | py |
null | Multi-domain-learning-FAS-main/source_multi_domain/train_architecture.py | # -*- coding: utf-8 -*-
# Copyright 2022
#
# Authors: Xiao Guo, Yaojie Liu, Anil Jain, and Xiaoming Liu.
#
# All Rights Reserved.s
#
# This research is based upon work supported by the Office of the Director of
# National Intelligence (ODNI), Intelligence Advanced Research Projects Activity
# (IARPA), via IARPA R&D Contract No. 2017-17020200004. The views and
# conclusions contained herein are those of the authors and should not be
# interpreted as necessarily representing the official policies or endorsements,
# either expressed or implied, of the ODNI, IARPA, or the U.S. Government. The
# U.S. Government is authorized to reproduce and distribute reprints for
# Governmental purposes not withstanding any copyright annotation thereon.
# ==============================================================================
import tensorflow as tf
import argparse
import os
import time
import math
import numpy as np
from model import Generator, Discriminator, region_estimator
from utils import l1_loss, l2_loss, Logging
from dataset import Dataset
from config import Config_siwm, Config_siw, Config_oulu
from tensorboardX import SummaryWriter
class SRENet(object):
"""
the SRENet class.
Attributes:
-----------
configurations: config, config_siw, and config_oulu.
modules: gen_pretrained, gen, RE, multi-disc and optimizers.
various directories for checkpoints.
log: log handler.
Methods:
-----------
basic functions: update_lr, _restore, _save.
optimization functions: train and train_step.
"""
def __init__(self, config, config_siw, config_oulu):
self.config = config
self.config_siw = config_siw
self.config_oulu = config_oulu
self.lr = config.lr
self.bs = config.BATCH_SIZE + config_siw.BATCH_SIZE + config_oulu.BATCH_SIZE
self.SUMMARY_WRITER = config.SUMMARY_WRITER
## The modules:
self.gen_pretrained = Generator()
self.RE = region_estimator()
self.gen = Generator(self.RE)
self.disc1 = Discriminator(1,config.n_layer_D)
self.disc2 = Discriminator(2,config.n_layer_D)
self.disc3 = Discriminator(4,config.n_layer_D)
self.gen_opt = tf.keras.optimizers.Adam(self.lr)
# Checkpoint initialization.
self.save_dir = config.save_model_dir
self.checkpoint_path_g = self.save_dir+"/gen/cp-{epoch:04d}.ckpt"
self.checkpoint_path_re = self.save_dir+"/ReE/cp-{epoch:04d}.ckpt"
self.checkpoint_path_d1 = self.save_dir+"/dis1/cp-{epoch:04d}.ckpt"
self.checkpoint_path_d2 = self.save_dir+"/dis2/cp-{epoch:04d}.ckpt"
self.checkpoint_path_d3 = self.save_dir+"/dis3/cp-{epoch:04d}.ckpt"
self.checkpoint_path_g_op = self.save_dir+"/g_opt/cp-{epoch:04d}.ckpt"
self.checkpoint_dir_g = os.path.dirname(self.checkpoint_path_g)
self.checkpoint_dir_re = os.path.dirname(self.checkpoint_path_re)
self.checkpoint_dir_d1 = os.path.dirname(self.checkpoint_path_d1)
self.checkpoint_dir_d2 = os.path.dirname(self.checkpoint_path_d2)
self.checkpoint_dir_d3 = os.path.dirname(self.checkpoint_path_d3)
self.checkpoint_dir_g_op = os.path.dirname(self.checkpoint_path_g_op)
self.model_list = [self.gen, self.RE, self.disc1, self.disc2, self.disc3]
self.model_p_list= [self.checkpoint_path_re,
self.checkpoint_path_g,
self.checkpoint_path_d1,
self.checkpoint_path_d2,
self.checkpoint_path_d3]
self.model_d_list= [self.checkpoint_dir_re,
self.checkpoint_dir_g,
self.checkpoint_dir_d1,
self.checkpoint_dir_d2,
self.checkpoint_dir_d3]
# Log class for displaying the losses.
self.log = Logging(config)
def update_lr(self, new_lr=0, restore=False, last_epoch=0):
if restore:
assert last_epoch != 0, print("Restoring LR should not start at 0 epoch.")
self.lr = self.lr * np.power(self.config.LEARNING_RATE_DECAY_FACTOR, last_epoch)
print(f"Restoring the previous learning rate {self.lr} at epoch {last_epoch}.")
self.gen_opt.learning_rate.assign(self.lr)
def _restore(self, model, checkpoint_dir, pretrain=False):
if not pretrain:
last_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)
model.load_weights(last_checkpoint)
last_epoch = int((last_checkpoint.split('.')[1]).split('-')[-1])
return last_epoch
else:
model.load_weights(checkpoint_dir+'/cp-0179.ckpt')
def _save(self, model, checkpoint_path, epoch):
model.save_weights(checkpoint_path.format(epoch=epoch))
#############################################################################
def train(self, dataset, dataset_siw, dataset_oulu, config):
last_checkpoint = tf.train.latest_checkpoint(self.checkpoint_dir_g)
if last_checkpoint:
for i, j in zip(self.model_list, self.model_d_list):
last_epoch = self._restore(i, j)
print('**********************************************************')
print('Restore from Epoch '+str(last_epoch))
self.update_lr(restore=True, last_epoch=last_epoch)
print('**********************************************************')
else:
# To load the pretrained model.
pretrain_model_dir = config.pretrain_folder
pretrain_model_dir_list = [pretrain_model_dir+'gen',
pretrain_model_dir+'dis1',
pretrain_model_dir+'dis2',
pretrain_model_dir+'dis3']
# last_checkpoint = tf.train.latest_checkpoint(pretrain_model_dir+'gen')
for i, j in zip([self.gen, self.disc1, self.disc2, self.disc3], pretrain_model_dir_list):
self._restore(i, j, True)
print('**********************************************************')
print('Loading pretrained model is done')
print("finish loading the pretrain-model.")
print('**********************************************************')
last_epoch = 0
for epoch in range(last_epoch, self.config.MAX_EPOCH):
start = time.time()
training = True
for step in range(self.config.STEPS_PER_EPOCH):
img_batch_siwm = dataset.nextit()
img_batch_siw = dataset_siw.nextit()
img_batch_oulu = dataset_oulu.nextit()
losses, figs = self.train_step(img_batch_siwm, img_batch_siw, img_batch_oulu,
training, tf.constant(step))
self.log.display(losses, epoch, step, training, self.config.STEPS_PER_EPOCH)
self.log.save(figs, training)
iter_num = self.config.STEPS_PER_EPOCH*epoch+step
for name_, loss_ in losses.items():
self.SUMMARY_WRITER.add_scalar(f'train/{name_}', loss_.numpy(), iter_num)
for i, j in zip(self.model_list, self.model_p_list):
self._save(i, j, epoch)
if epoch % config.DECAY_STEP == 0:
self.SUMMARY_WRITER.add_scalar(f'train/gen_lr', self.gen_opt.learning_rate.numpy(), epoch)
self.lr = self.lr * config.LEARNING_RATE_DECAY_FACTOR
self.update_lr(self.lr)
self.SUMMARY_WRITER.flush()
self.SUMMARY_WRITER.close()
@tf.function
def train_step(self, data, data1, data2, training, step=0):
losses = {}
figs = []
bsize, imsize = self.bs, self.config.IMG_SIZE
img_li0, img_sp0, dmap_li0, dmap_sp0, _, _, _ = data
img_li1, img_sp1, dmap_li1, dmap_sp1, _, _, _ = data1
img_li2, img_sp2, dmap_li2, dmap_sp2, _, _, _ = data2
img_li = tf.concat([img_li0, img_li1, img_li2], axis=0)
img_sp = tf.concat([img_sp0, img_sp1, img_sp2], axis=0)
dmap_li = tf.concat([dmap_li0, dmap_li1, dmap_li2], axis=0)
dmap_sp = tf.concat([dmap_sp0, dmap_sp1, dmap_sp2], axis=0)
img = tf.concat([img_li, img_sp], axis=0)
dmap = tf.concat([dmap_li, dmap_sp], axis=0)
dmap_size_32 = tf.image.resize(dmap, [32, 32])
###########################################################
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape, tf.GradientTape() as reg_tape:
dmap_pred, p, c, n, x, region_map = self.gen(img, training=training)
region_map = tf.reshape(region_map, [2*bsize,256,256,1])
# Live reconstruction.
recon = (1 - p) * (img - n) + p * c
trace = img - recon
d_img = tf.concat([img[:bsize, ...], recon[bsize:, ...]], axis=0)
d_output_1 = self.disc1(d_img, training=training)
d_output_2 = self.disc2(d_img, training=training)
d_output_3 = self.disc3(d_img, training=training)
# Semantic mask loss.
p_prior_knowledge = l1_loss(p[..., 0], dmap[..., 1])
real_change = tf.zeros([4,256,256])
siwm_change = tf.cast(tf.greater(tf.reduce_sum(tf.abs(trace[4:6,:,:]), axis=3), 0.3),tf.float32)
siw_oulu_change = tf.cast(tf.greater(tf.reduce_sum(tf.abs(trace[6:,:,:]), axis=3), 0.1),tf.float32)
p_significant_change = tf.stop_gradient(tf.concat([real_change, siwm_change, siw_oulu_change], axis=0))
map_loss = l1_loss(tf.squeeze(region_map), p_significant_change)
p_post_constraint = tf.reduce_mean(tf.abs(tf.squeeze(p[bsize:, ...])
- p_significant_change[bsize:, ...]))
p_loss = p_prior_knowledge * 0.1 + p_post_constraint
# Trace constraint loss.
trace_loss = tf.reduce_mean(tf.abs(trace[:bsize, ...])) + \
tf.reduce_mean(tf.abs(trace[bsize:, ...])) * 1e-5
# Depth map loss.
dmap_loss = l1_loss(dmap_pred, dmap_size_32) * 100
# GAN loss for the generator.
gan_loss = l2_loss(d_output_1[1], 1) + l2_loss(d_output_2[1], 1) + \
l2_loss(d_output_3[1], 1)
# Overall loss for generator.
g_total_loss = dmap_loss + gan_loss + p_loss + trace_loss * 10
# Discriminators loss.
d_loss_r = l2_loss(d_output_1[0], 1) + l2_loss(d_output_2[0], 1) + \
l2_loss(d_output_3[0], 1)
d_loss_s = l2_loss(d_output_1[1], 0) + l2_loss(d_output_2[1], 0) + \
l2_loss(d_output_3[1], 0)
d_total_loss = (d_loss_r + d_loss_s) / 4
if training:
# Gather all the trainable variables
gen_trainable_vars = self.gen.trainable_variables
reg_trainable_vars = self.RE.trainable_variables
disc_trainable_vars = self.disc1.trainable_variables + \
self.disc2.trainable_variables + \
self.disc3.trainable_variables
# Generate gradients.
r_gradients = reg_tape.gradient(map_loss, reg_trainable_vars)
g_gradients = gen_tape.gradient(g_total_loss, gen_trainable_vars)
d_gradients = disc_tape.gradient(d_total_loss, disc_trainable_vars)
# Backpropogate gradients.
self.gen_opt.apply_gradients(zip(g_gradients, gen_trainable_vars))
self.gen_opt.apply_gradients(zip(r_gradients, reg_trainable_vars))
if step % 2 == 0:
self.gen_opt.apply_gradients(zip(d_gradients, disc_trainable_vars))
# Gather losses for displaying for tracking the training.
losses['dmap'] = dmap_loss
losses['gen'] = gan_loss
losses['map_loss'] = map_loss
losses['p_post_constraint'] = p_post_constraint
losses['disc_real'] = d_loss_r
losses['disc_fake'] = d_loss_s
losses['p_prior_knowledge'] = p_prior_knowledge * 0.1
losses['trace_loss'] = trace_loss
# Gather network output and intermediate results for visualization.
dmap = tf.concat([dmap, tf.zeros([bsize*2, 256, 256, 1])], axis=3)
dmap_pred = tf.concat([dmap_pred, tf.zeros([bsize*2, 32, 32, 1])], axis=3)
p_significant_change = tf.expand_dims(p_significant_change, axis=-1)
figs = [img, recon, tf.abs(p), tf.abs(p_significant_change),
tf.abs(region_map), tf.abs(c), tf.abs(n), dmap, dmap_pred]
return losses, figs
def main(args):
# Base Configuration Class
config, config_siw, config_oulu = Config_siwm(args), Config_siw(args), Config_oulu(args)
config.lr = args.lr
config.type = args.type
config.pretrain_folder = args.pretrain_folder
config.desc_str = '_data_'+args.data+\
'_stage_'+config.phase+\
'_type_'+config.type+\
'_decay_'+str(config.DECAY_STEP)+\
'_epoch_'+str(args.epoch)+\
'_lr_'+str(config.lr)
config.root_dir = './log'+config.desc_str
config.exp_dir = '/exp'+config.desc_str
config.CHECKPOINT_DIR = config.root_dir+config.exp_dir
config.tb_dir = './tb_logs'+config.desc_str
config.save_model_dir = "./save_model"+config.desc_str
config.SUMMARY_WRITER = SummaryWriter(config.tb_dir)
os.makedirs(config.root_dir, exist_ok=True)
os.makedirs(config.save_model_dir, exist_ok=True)
os.makedirs(config.CHECKPOINT_DIR, exist_ok=True)
os.makedirs(config.CHECKPOINT_DIR+'/test', exist_ok=True)
print('**********************************************************')
print(f"Making root folder: {config.root_dir}")
print(f"Current exp saved into folder: {config.CHECKPOINT_DIR}")
print(f"The tensorboard results are saved into: {config.tb_dir}")
print(f"The trained weights saved into folder: {config.save_model_dir}")
print('**********************************************************')
config.compile(dataset_name='SiWM-v2')
config_siw.compile(dataset_name='SiW')
config_oulu.compile(dataset_name='Oulu')
print('**********************************************************')
srenet = SRENet(config, config_siw, config_oulu)
dataset_train_siwm = Dataset(config, 'train')
dataset_train_siw = Dataset(config_siw, 'train')
dataset_train_oulu = Dataset(config_oulu, 'train')
srenet.train(dataset_train_siwm, dataset_train_siw, dataset_train_oulu, config)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--cuda', type=int, default=6, help='The gpu num to use.')
parser.add_argument('--stage', type=str, default='ft', choices=['ft','pretrain','ub'])
parser.add_argument('--type', type=str, default='spoof', choices=['spoof','age','race','illu'])
parser.add_argument('--set', type=str, default='all', help='To choose from the predefined 14 types.')
parser.add_argument('--epoch', type=int, default=60, help='How many epochs to train the model.')
parser.add_argument('--data', type=str, default='all', choices=['all','SiW','SiWM','oulu'])
parser.add_argument('--lr', type=float, default=1e-7, help='The starting learning rate.')
parser.add_argument('--decay_step', type=int, default=2, help='The learning rate decay step.')
parser.add_argument('--pretrain_folder', type=str, default='./pre_trained/', help='Pretrain weight.')
args = parser.parse_args()
main(args)
| 13,810 | 43.551613 | 106 | py |