python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', with_kldiv=False, with_avuloss=False, annealing_method='exp') model = dict( ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_r50_8x8x1_150e_kinetics_rgb.py
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', with_avuloss=True, annealing_method='exp') model = dict( type='Recognizer3D', backbone=dict( ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_avuc_r50_8x8x1_150e_kinetics_rgb.py
# model settings model = dict( type='Recognizer3DRPL', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inf...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/finetune_ucf101_tpn_slowonly_rpl.py
# model settings model = dict( type='Recognizer3DBNN', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inf...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_bnn_r50_8x8x1_150e_kinetics_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflat...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/inference_tpn_slowonly_dnn.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflat...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/inference_tpn_slowonly_enn.py
# model settings model = dict( type='Recognizer3D', backbone=dict(type='X3D', gamma_w=1, gamma_b=2.25, gamma_d=2.2), cls_head=dict( type='X3DHead', in_channels=432, num_classes=400, spatial_type='avg', dropout_ratio=0.5, fc1_bias=False)) # model training and t...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/x3d/x3d_m_16x5x1_facebook_kinetics400_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict(type='X3D', gamma_w=1, gamma_b=2.25, gamma_d=2.2), cls_head=dict( type='X3DHead', in_channels=432, num_classes=400, spatial_type='avg', dropout_ratio=0.5, fc1_bias=False)) # model training and t...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/x3d/x3d_s_13x6x1_facebook_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_inference_1x1x3_100e_kinetics400_rgb.py
model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=51, in_channels=2048, spatial_type='avg', consensus=dict(type='...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x3_110e_kinetics400_flow.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_fp16_r50_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='modelzoo/tsn_r50_320p_1x1x8_110e_kinetics400_flow.pth', depth=50, in_channels=10, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=200, in...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_video_flow.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=True), cls_head=dict( type='TSNHead', num_classes=174, ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x16_50e_sthv1_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_320p_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=174, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_sthv2_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_dense_1x1x8_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_dense_1x1x8_100e_kinetics400_rgb.py
model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=51, in_channels=2048, spatial_type='avg', consensus=dict(type='...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_mit_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='modelzoo/tsn_r50_320p_1x1x8_100e_kinetics400_rgb.pth', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=200, in_channels=2048, s...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_clip_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='modelzoo/tsn_r50_320p_1x1x8_100e_kinetics400_rgb.pth', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=200, in_channels=2048, s...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_video_rgb.py
model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=51, in_channels=2048, spatial_type='avg', consensus=dict(type='...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_imagenet_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=339, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x6_100e_mit_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_dense_1x1x5_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='modelzoo/tsn_r50_320p_1x1x8_110e_kinetics400_flow.pth', depth=50, in_channels=10, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=200, in...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_clip_flow.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_110e_kinetics400_flow.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=True), cls_head=dict( type='TSNHead', num_classes=174, ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_sthv1_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=174, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x16_50e_sthv2_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=101, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x3_75e_ucf101_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=700, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics700_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=600, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics600_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet101', depth=101, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=313, in_channels=2048, spatial_type='avg', c...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r101_1x1x5_50e_mmit_rgb.py
# model settings category_nums = dict( action=739, attribute=117, concept=291, event=69, object=1678, scene=248) target_cate = 'object' model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet18', depth=18, norm_eval=False), cls_h...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_concept_rgb.py
# model settings category_nums = dict( action=739, attribute=117, concept=291, event=69, object=1678, scene=248) target_cate = 'action' model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet18', depth=18, norm_eval=False), cls_h...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_action_rgb.py
# model settings category_nums = dict( action=739, attribute=117, concept=291, event=69, object=1678, scene=248) target_cate = 'scene' model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet18', depth=18, norm_eval=False), cls_he...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_scene_rgb.py
# model settings category_nums = dict( action=739, attribute=117, concept=291, event=69, object=1678, scene=248) target_cate = 'attribute' model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet18', depth=18, norm_eval=False), cl...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_attribute_rgb.py
# model settings category_nums = dict( action=739, attribute=117, concept=291, event=69, object=1678, scene=248) target_cate = 'event' model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet18', depth=18, norm_eval=False), cls_he...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_event_rgb.py
# model settings category_nums = dict( action=739, attribute=117, concept=291, event=69, object=1678, scene=248) target_cate = 'object' model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet18', depth=18, norm_eval=False), cls_h...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_object_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_320p_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_340x256_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_256p_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_340x256_1x1x25_3crop_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_340x256_1x1x25_10crop_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_340x256_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_320p_1x1x25_10crop_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_320p_1x1x25_3crop_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_256p_1x1x25_10crop_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_256p_1x1x25_3crop_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_320p_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', con...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_256p_1x1x3_100e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x8x1_150e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, in_channels=2, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_flow.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=101, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r101_8x8x1_196e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_150e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_120e_gym99_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics600_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics700_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, in_channels=2, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), with_pool2=False, ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_kinetics_pretrained_r50_4x16x1_120e_gym99_flow.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_4x16x1_256e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, in_channels=2, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_flow.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_inference_4x16x1_256e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_340x256_4x16x1_256e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_320p_4x16x1_256e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_256p_4x16x1_256e_kinetics400_rgb.py
# model settings model = dict( type='AudioRecognizer', backbone=dict( type='ResNetAudio', depth=50, pretrained=None, in_channels=1, norm_eval=False), cls_head=dict( type='AudioTSNHead', num_classes=400, in_channels=1024, dropout_ratio=0...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition_audio/audioonly/audioonly_r50_64x1x1_100e_kinetics400_audio_feature.py
# model settings model = dict( type='AudioRecognizer', backbone=dict(type='ResNet', depth=50, in_channels=1, norm_eval=False), cls_head=dict( type='AudioTSNHead', num_classes=400, in_channels=2048, dropout_ratio=0.5, init_std=0.01)) # model training and testing settin...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition_audio/resnet/tsn_r50_64x1x1_100e_kinetics400_audio.py
# model settings model = dict( type='AudioRecognizer', backbone=dict(type='ResNet', depth=18, in_channels=1, norm_eval=False), cls_head=dict( type='AudioTSNHead', num_classes=400, in_channels=512, dropout_ratio=0.5, init_std=0.01)) # model training and testing setting...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition_audio/resnet/tsn_r18_64x1x1_100e_kinetics400_audio_feature.py
# model settings model = dict( type='BMN', temporal_dim=100, boundary_ratio=0.5, num_samples=32, num_samples_per_bin=3, feat_dim=400, soft_nms_alpha=0.4, soft_nms_low_threshold=0.5, soft_nms_high_threshold=0.9, post_process_top_k=100) # model training and testing settings train_c...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py
# model training and testing settings train_cfg = dict( ssn=dict( assigner=dict( positive_iou_threshold=0.7, background_iou_threshold=0.01, incomplete_iou_threshold=0.3, background_coverage_threshold=0.02, incomplete_overlap_threshold=0.01), ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py
# model training and testing settings train_cfg = dict( ssn=dict( assigner=dict( positive_iou_threshold=0.7, background_iou_threshold=0.01, incomplete_iou_threshold=0.3, background_coverage_threshold=0.02, incomplete_overlap_threshold=0.01), ...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py
# model settings model = dict( type='TEM', temporal_dim=100, boundary_ratio=0.1, tem_feat_dim=400, tem_hidden_dim=512, tem_match_threshold=0.5) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='score') # dataset settings dataset_type = 'ActivityNetDataset' dat...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py
# dataset settings dataset_type = 'ActivityNetDataset' data_root = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/' data_root_val = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/' ann_file_train = 'data/ActivityNet/anet_anno_train.json' ann_file_val = 'data/ActivityNet/anet_anno_val.json' ann_file_te...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/localization/bsn/bsn_pgm_400x100_activitynet_feature.py
# model settings model = dict( type='PEM', pem_feat_dim=32, pem_hidden_dim=256, pem_u_ratio_m=1, pem_u_ratio_l=2, pem_high_temporal_iou_threshold=0.6, pem_low_temporal_iou_threshold=2.2, soft_nms_alpha=0.75, soft_nms_low_threshold=0.65, soft_nms_high_threshold=0.9, post_proce...
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py
from itertools import count import os import numpy as np import math import sys import time import datetime import logging from typing import Iterable, Optional import torch import torch.nn as nn import torch.nn.functional as F from timm.data import Mixup from timm.utils import accuracy, ModelEma import utils from al...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/engine_for_finetuning.py
import numpy as np class TubeMaskingGenerator: def __init__(self, input_size, mask_ratio): self.frames, self.height, self.width = input_size self.num_patches_per_frame = self.height * self.width self.total_patches = self.frames * self.num_patches_per_frame self.num_masks_per_frame ...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/masking_generator.py
import torch import torchvision.transforms.functional as F import warnings import random import numpy as np import torchvision from PIL import Image, ImageOps import numbers class GroupRandomCrop(object): def __init__(self, size): if isinstance(size, numbers.Number): self.size = (int(size), in...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/transforms.py
import math import sys from typing import Iterable import torch import torch.nn as nn import utils from einops import rearrange from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD def train_one_epoch(model: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, ...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/engine_for_pretraining.py
import math import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from modeling_finetune import Block, _cfg, PatchEmbed, get_sinusoid_encoding_table from timm.models.registry import register_model from timm.models.layers import trunc_normal_ as __call_trunc_normal_ def tru...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/modeling_pretrain.py
# -*- coding: utf-8 -*- import argparse import numpy as np import torch import torch.backends.cudnn as cudnn from PIL import Image from pathlib import Path from timm.models import create_model from datasets import DataAugmentationForVideoMAE from torchvision.transforms import ToPILImage from einops import rearrange fro...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/run_videomae_vis.py
import os from torchvision import transforms from transforms import * from masking_generator import TubeMaskingGenerator from kinetics import VideoClsDataset, VideoMAE from data.ava import AVAVideoDataset,KineticsDataset,AKDataset from data.transforms import TransformsCfg import alphaction.config.paths_catalog as path...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/datasets.py
""" This implementation is based on https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/auto_augment.py pulished under an Apache License 2.0. COMMENT FROM ORIGINAL: AutoAugment, RandAugment, and AugMix for PyTorch This code implements the searched ImageNet policies with various tweaks and improveme...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/rand_augment.py
import numpy as np from PIL import Image import torch def convert_img(img): """Converts (H, W, C) numpy.ndarray to (C, W, H) format """ if len(img.shape) == 3: img = img.transpose(2, 0, 1) if len(img.shape) == 2: img = np.expand_dims(img, 0) return img class ClipToTensor(object):...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/volume_transforms.py
import argparse import datetime from operator import is_ import numpy as np import time import torch import torch.backends.cudnn as cudnn import json import os from functools import partial from pathlib import Path from collections import OrderedDict from timm.data.mixup import Mixup from timm.models import create_mo...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/run_class_finetuning.py
import argparse import datetime import numpy as np import time import torch import torch.backends.cudnn as cudnn import json import os from pathlib import Path from timm.models import create_model from optim_factory import create_optimizer from datasets import build_pretraining_dataset from engine_for_pretraining impor...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/run_mae_pretraining.py
import numbers import cv2 import numpy as np import PIL import torch def _is_tensor_clip(clip): return torch.is_tensor(clip) and clip.ndimension() == 4 def crop_clip(clip, min_h, min_w, h, w): if isinstance(clip[0], np.ndarray): cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip] ...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/functional.py
import io import os import math import time import json from collections import defaultdict, deque import datetime import numpy as np from timm.utils import get_state_dict from torch.utils.data._utils.collate import default_collate from pathlib import Path import subprocess import torch import torch.distributed as dist...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/utils.py
""" This implementation is based on https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/random_erasing.py pulished under an Apache License 2.0. """ import math import random import torch def _get_pixels( per_pixel, rand_color, patch_size, dtype=torch.float32, device="cuda" ): # NOTE I've s...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/random_erasing.py
#!/usr/bin/env python3 import math import numpy as np import random import torch import torchvision.transforms.functional as F from PIL import Image from torchvision import transforms from rand_augment import rand_augment_transform from random_erasing import RandomErasing import numbers import PIL import torchvision...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/video_transforms.py
import os import numpy as np from numpy.lib.function_base import disp import torch import decord from PIL import Image from torchvision import transforms from random_erasing import RandomErasing import warnings from decord import VideoReader, cpu from torch.utils.data import Dataset import video_transforms as video_tra...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/kinetics.py
from functools import partial import numpy as np from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import drop_path, to_2tuple, trunc_normal_ from timm.models.registry import register_model from dataclasses import dataclass from alphaction.modeling.poole...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/modeling_finetune.py
import torch from torch import optim as optim from timm.optim.adafactor import Adafactor from timm.optim.adahessian import Adahessian from timm.optim.adamp import AdamP from timm.optim.lookahead import Lookahead from timm.optim.nadam import Nadam from timm.optim.novograd import NovoGrad from timm.optim.nvnovograd impo...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/optim_factory.py
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/alphaction/__init__.py
import torch from torch import nn from torch.autograd import Function from torch.autograd.function import once_differentiable # import alphaction._custom_cuda_ext as _C class _SoftmaxFocalLoss(Function): @staticmethod def forward(ctx, logits, targets, gamma, alpha): ctx.gamma = gamma ctx.alph...
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/softmax_focal_loss.py