python_code stringlengths 0 992k | repo_name stringlengths 8 46 | file_path stringlengths 5 162 |
|---|---|---|
from glob import glob
from .base_dataset import BaseDataset
import random
import os
import pandas as pd
import io
from PIL import Image
from CoTrain.datasets import client
class CC3MDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.s... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datasets/image/cc3m.py |
# pretrain dataset
## video
from CoTrain.datamodules.video.webvid_datamodule import WEBVIDDataModule
from CoTrain.datamodules.video.webvid10m_datamodule import WEBVID10MDataModule
from CoTrain.datamodules.video.howto100m_datamodule import HT100MDataModule
from CoTrain.datamodules.video.youtube_datamodule import YOUTUBE... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/__init__.py |
from CoTrain.datasets import MSRVTTChoiceDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class MSRVTTChoiceDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return MSRVTTChoiceDa... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msrvtt_choice_datamodule.py |
from CoTrain.datasets import TGIFQADataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class TGIFQADataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return TGIFQADataset
@proper... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/tgifqa_datamodule.py |
from CoTrain.datasets import WEBVID10MDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class WEBVID10MDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return WEBVID10MDataset
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/webvid10m_datamodule.py |
import functools
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
from torch.utils.data.dataset import ConcatDataset
from torch.utils.data.distributed import DistributedSampler
from pytorch_lightning.trainer.supporters import CombinedLoader
from CoTrain.datamodules import _dat... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/multitask_datamodule.py |
from CoTrain.datasets import MSRVTTDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class MSRVTTDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return MSRVTTDataset
@proper... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msrvtt_datamodule.py |
from CoTrain.datasets import LSMDCChoiceDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class LSMDCChoiceDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return LSMDCChoiceDatas... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/lsmdc_choice_datamodule.py |
from CoTrain.datasets import HMDB51Dataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class HMDB51DataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return HMDB51Dataset
@proper... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/hmdb51_datamodule.py |
from CoTrain.datasets import Ego4DDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class Ego4DDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return Ego4DDataset
@property
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/ego4d_datamodule.py |
from CoTrain.datasets import TGIFDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class TGIFDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return TGIFDataset
@property
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/tgif_datamodule.py |
from CoTrain.datasets import TVQADataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class TVQADataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return TVQADataset
@property
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/tvqa_datamodule.py |
from CoTrain.datasets import HT100MDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class HT100MDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return HT100MDataset
@proper... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/howto100m_datamodule.py |
from CoTrain.datasets import MSRVTTQADataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
from collections import defaultdict
class MSRVTTQADataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msrvttqa_datamodule.py |
InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/__init__.py | |
from CoTrain.datasets import MSVDQADataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
from collections import defaultdict
class MSVDQADataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msvdqa_datamodule.py |
from CoTrain.datasets import K400VideoDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class K400VideoDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return K400VideoDataset
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/k400_video_datamodule.py |
from CoTrain.datasets import YOUTUBEDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class YOUTUBEDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return YOUTUBEDataset
@pro... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/youtube_datamodule.py |
from CoTrain.datasets import UCF101Dataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class UCF101DataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return UCF101Dataset
@proper... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/ucf101_datamodule.py |
from CoTrain.datasets import DIDEMODataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class DIDEMODataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return DIDEMODataset
@proper... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/didemo_datamodule.py |
from CoTrain.datasets import YTTemporalDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class YTTemporalMDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return YTTemporalDataset... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/yttemporal_datamodule.py |
from CoTrain.datasets import WEBVIDDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class WEBVIDDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return WEBVIDDataset
@proper... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/webvid_datamodule.py |
from CoTrain.datasets import K400Dataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class K400DataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return K400Dataset
@property
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/k400_datamodule.py |
from CoTrain.datasets import EGO4DChoiceDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class EGO4DChoiceDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return EGO4DChoiceDatas... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/ego4d_choice_datamodule.py |
from CoTrain.datasets import LSMDCDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class LSMDCDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return LSMDCDataset
@property
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/lsmdc_datamodule.py |
from CoTrain.datasets import MSVDDataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
class MSVDDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return MSVDDataset
@property
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/video/msvd_datamodule.py |
from CoTrain.datasets import NLVR2Dataset
from .datamodule_base import BaseDataModule
class NLVR2DataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return NLVR2Dataset
@property
def dataset_name(self... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/nlvr2_datamodule.py |
import torch
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
from transformers import (
DataCollatorForLanguageModeling,
DataCollatorForWholeWordMask,
BertTokenizer,
)
def get_pretrained_tokenizer(from_pretrained):
if torch.distributed.is_initialized():
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/datamodule_base.py |
from CoTrain.datasets import ConceptualCaptionDataset
from .datamodule_base import BaseDataModule
class ConceptualCaptionDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return ConceptualCaptionDataset
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/conceptual_caption_datamodule.py |
from CoTrain.datasets import SBUCaptionDataset
from .datamodule_base import BaseDataModule
class SBUCaptionDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return SBUCaptionDataset
@property
def da... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/sbu_datamodule.py |
from CoTrain.datasets import MIX100MDataset
from .datamodule_base import BaseDataModule
class MIX100MDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return MIX100MDataset
@property
def dataset_nam... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/mix100m_datamodule.py |
from CoTrain.datasets import CC3MDataset
from .datamodule_base import BaseDataModule
class CC3MDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return CC3MDataset
@property
def dataset_name(self):
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/cc3m_datamodule.py |
InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/__init__.py | |
from CoTrain.datasets import VisualGenomeCaptionDataset
from .datamodule_base import BaseDataModule
class VisualGenomeCaptionDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return VisualGenomeCaptionDatase... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/vg_caption_datamodule.py |
from CoTrain.datasets import VQAv2Dataset
from CoTrain.datamodules.image.datamodule_base import BaseDataModule
from collections import defaultdict
class VQAv2DataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/vqav2_datamodule.py |
from CoTrain.datasets import YFCC15MDataset
from .datamodule_base import BaseDataModule
class YFCC15MDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return YFCC15MDataset
@property
def dataset_nam... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/yfcc15m_datamodule.py |
from CoTrain.datasets import LAION400MDataset
from .datamodule_base import BaseDataModule
class LAION400MDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return LAION400MDataset
@property
def datas... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/laion400m_datamodule.py |
from CoTrain.datasets import CocoCaptionKarpathyDataset
from .datamodule_base import BaseDataModule
class CocoCaptionKarpathyDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return CocoCaptionKarpathyDatase... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/coco_caption_karpathy_datamodule.py |
from CoTrain.datasets import ActivityNetDataset
from .datamodule_base import BaseDataModule
class ActivityNetDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return ActivityNetDataset
@property
def... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/activitynet_datamodule.py |
from CoTrain.datasets import VCRDataset
from .datamodule_base import BaseDataModule
class VCRDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return VCRDataset
@property
def dataset_name(self):
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/vcr_datamodule.py |
from CoTrain.datasets import F30KCaptionKarpathyDataset
from .datamodule_base import BaseDataModule
class F30KCaptionKarpathyDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return F30KCaptionKarpathyDatase... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/f30k_caption_karpathy_datamodule.py |
from CoTrain.datasets import CC12MDataset
from .datamodule_base import BaseDataModule
class CC12MDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return CC12MDataset
@property
def dataset_name(self... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/datamodules/image/cc12m_datamodule.py |
from CoTrain.transforms.image.pixelbert import (
pixelbert_transform,
pixelbert_transform_randaug,
open_clip_transform,
)
_transforms = {
"pixelbert": pixelbert_transform,
"pixelbert_randaug": pixelbert_transform_randaug,
"open_clip": open_clip_transform,
}
def keys_to_transforms(keys: list, ... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/__init__.py |
# input: (C, T, H, W) output: (C, T, H, W)
def VideoTransform(mode='train', crop_size=224, backend='v100'):
if backend == 'a100':
print("initalize data augmentation for a100 gpus")
import CoTrain.transforms.video.video_transform as video_transform
from torchvision import transforms
#... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/video/videoaug.py |
InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/video/__init__.py | |
import numbers
import random
import numpy as np
import PIL
import skimage
import skimage.transform
import torchvision
import torch
from CoTrain.transforms.image import functional as F
from torchvision import transforms
from PIL import Image
def convert_img(img):
"""Converts (H, W, C) numpy.ndarray to (C, W, H) fo... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/video/video_transform.py |
from .utils import (
inception_normalize,
MinMaxResize,
)
from torchvision import transforms
from .randaug import RandAugment
def pixelbert_transform(size=800, mode="train"):
longer = int((1333 / 800) * size)
return transforms.Compose(
[
MinMaxResize(shorter=size, longer=longer),
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/image/pixelbert.py |
InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/image/__init__.py | |
# code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import random
import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw
import numpy as np
import torch
from PIL import Image
def ShearX(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/image/randaug.py |
import numbers
import torch
import cv2
import numpy as np
import PIL
def _is_tensor_clip(clip):
return torch.is_tensor(clip) and clip.ndimension() == 4
def crop_clip(clip, min_h, min_w, h, w):
if isinstance(clip[0], np.ndarray):
cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip]
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/image/functional.py |
from torchvision import transforms
from PIL import Image
class MinMaxResize:
def __init__(self, shorter=800, longer=1333):
self.min = shorter
self.max = longer
def __call__(self, x):
w, h = x.size
scale = self.min / min(w, h)
if h < w:
newh, neww = self.min... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/image/utils.py |
def image_aug(images, image_transform):
# print(image_transform)
# I've no idea what the fuck this is doing
# TODO: Maybe remove the second view?
global_transform = image_transform[0]
# local_transform = image_transform[0][1]
global_images_tensor = []
# 2 GLOBAL views
for i in range(2):
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/transforms/image/imageaug.py |
import torch
import torch.nn as nn
import random
class TemporalRoll(nn.Module):
def __init__(self, n_segment=3, n_div=8, v=0):
super(TemporalRoll, self).__init__()
self.n_segment = n_segment
self.fold_div = n_div
self.v = v
def forward(self, x, layer=1):
# return x
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/temporal_roll.py |
import torch
import torch.nn as nn
import pytorch_lightning as pl
from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings
from CoTrain.modules import heads, cotrain_utils
from CoTrain.modules import objectives as objectives
from CoTrain.modules import base_vision_transformer as vit
from CoTrain.mo... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/cotrain_module.py |
import torch
import io
import random
from transformers import (
get_polynomial_decay_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from CoTrain.modules.dist_utils import all_gather
from CoTrain.modules.objectives import compute_irtr_recall, compute_decouple_irtr_recall, compute_ind_irtr_recall
# fro... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/cotrain_utils.py |
import torch
# def forzen_param(model):
# for name, param in model.named_parameters():
# if 'mlm_score' in name or 'vtm_score' in name or 'mpp_score' in name:
# param.requires_grad = True
# else:
# param.requires_grad = False
# return True
def forzen_param(model):
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/forzen_param.py |
""" Vision Transformer (ViT) in PyTorch
A PyTorch implement of Vision Transformers as described in
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929
The official jax code is released and available at https://github.com/google-research/vision_transformer
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/base_vision_transformer.py |
# Modified from https://github.com/lucidrains/CoCa-pytorch/blob/main/coca_pytorch/coca_pytorch.py
from turtle import forward
import torch
from torch import einsum, nn
import torch.nn.functional as F
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
def default(val, ... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/coca.py |
# from CoTrain.modules.cotrain_dino_module_v2 import CoTrainTransformerSS
from CoTrain.modules.cotrain_module import CoTrainTransformerSS
# from CoTrain.modules.cotrain_dino_module_v3 import CoTrainTransformerSS
from CoTrain.modules.clip_module import CLIP | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/__init__.py |
clip_param_keys = {
'positional_embedding',
'text_projection',
'visual.class_embedding',
'visual.positional_embedding',
'visual.conv1.weight',
'visual.ln_pre.weight',
'visual.ln_pre.bias',
'visual.transformer.resblocks.0.attn.in_proj_weight',
'visual.transformer.resblocks.0.attn.in_p... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/clip_param_keys.py |
# Code for "ActionCLIP: ActionCLIP: A New Paradigm for Action Recognition"
# arXiv:
# Mengmeng Wang, Jiazheng Xing, Yong Liu
import torch
import CoTrain.modules.InternVideo as internvideo
from CoTrain.datasets import K400VideoDataset
def text_prompt(data = K400VideoDataset.classes(), prompt_type='all'):
if promp... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/text_prompt.py |
import random
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import itertools
from torch.utils.data.distributed import DistributedSampler
import torch.distributed.nn as distnn
from einops import rearrange, repeat
from CoTra... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/objectives.py |
import os
import math
import numbers
from pathlib import Path
import ipdb
import numpy as np
import torch
import scipy.stats
from sklearn.metrics import average_precision_score
import ipdb
import pdb
def t2v_metrics(sims, query_masks=None):
"""Compute retrieval metrics from a similiarity matrix.
Args:
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/retrieval_metrics.py |
from copy import deepcopy
import torch
import torch.nn as nn
import pytorch_lightning as pl
from CoTrain.modules import heads, cotrain_utils
from CoTrain.modules import objectives as objectives
from CoTrain.modules import base_vision_transformer as vit
from CoTrain.modules.text_prompt import text_prompt
import os
impor... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/clip_module.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
import torch
_LOCAL_... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/dist_utils.py |
from requests import patch
import torch
import torch.nn as nn
from .coca import Residual, ParallelTransformerBlock, CrossAttention
from einops import repeat
import torch.utils.checkpoint as checkpoint
import numpy as np
from timm.models.layers import trunc_normal_ as __call_trunc_normal_
def trunc_normal_(tensor, m... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/clip_decoders.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.models.bert.modeling_bert import BertPredictionHeadTransform
class Pooler(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation =... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/heads.py |
from .internvideo import * | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/__init__.py |
import numbers
import random
import numpy as np
import PIL
import skimage
import skimage.transform
import torchvision
import torch
from torchvision import transforms
from PIL import Image
import torch
import cv2
def _is_tensor_clip(clip):
return torch.is_tensor(clip) and clip.ndimension() == 4
def crop_clip(cli... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/video_transform.py |
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corr... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/simple_tokenizer.py |
import torch
import numpy as np
import decord
from typing import Any, OrderedDict, Union, List
from pkg_resources import packaging
from torchvision import transforms
from . import video_transform
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
from .clip_utils.model import build_model
from .clip_utils imp... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/internvideo.py |
from .clip import *
| InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/__init__.py |
from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.checkpoint import checkpoint_sequential
from . import utils
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/model.py |
import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
from pkg_resources import packaging
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokeni... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/clip.py |
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corr... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/simple_tokenizer.py |
#!/usr/bin/env python
import warnings
from typing import Tuple, Optional
import torch
from torch import Tensor
from torch.nn.modules.linear import Linear
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
from... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/attention.py |
#!/usr/bin/env python
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from .attention import MultiheadAttention
import logging
logger = logging.getLogger(__name__)
MODE... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/clip_vit_only_global.py |
# from .evl_module import TransformerDecoder
from .clip_vit_only_global import vit_only_global_b32, vit_only_global_b16, vit_only_global_l14, vit_only_global_l14_336 | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/__init__.py |
r"""Functional interface"""
import warnings
import math
import torch
from torch import _VF
from torch._jit_internal import Optional, Tuple
from torch.overrides import has_torch_function, handle_torch_function
from torch.nn.functional import pad, linear, softmax, dropout
Tensor = torch.Tensor
def multi_head_attenti... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/attention_module.py |
r"""Functional interface"""
import warnings
import math
import torch
from torch import _VF
from torch._jit_internal import Optional, Tuple
from torch.overrides import has_torch_function, handle_torch_function
from torch.nn.functional import pad, linear, softmax, dropout
Tensor = torch.Tensor
def multi_head_attenti... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/modules/InternVideo/clip_utils/utils/attention_module_bias.py |
InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/gadgets/__init__.py | |
import torch
from torchmetrics import Metric
# import torchmetrics as Metric
def order_class_index(order):
"""Return the index of the order in its full permutation.
Args:
order (tensor): e.g. [0,1,2]
"""
classes = list(itertools.permutations(list(range(len(order)))))
return classes.index(t... | InternVideo-main | Downstream/multi-modalities-downstream/CoTrain/gadgets/my_metrics.py |
from setuptools import find_packages, setup
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmaction/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return... | InternVideo-main | Downstream/Open-Set-Action-Recognition/setup.py |
# Copyright (c) Open-MMLab. All rights reserved.
__version__ = '0.9.0'
def parse_version_info(version_str):
version_info = []
for x in version_str.split('.'):
if x.isdigit():
version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
... | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/version.py |
import mmcv
from mmcv import digit_version
from .version import __version__
mmcv_minimum_version = '1.1.1'
mmcv_maximum_version = '1.3'
mmcv_version = digit_version(mmcv.__version__)
assert (digit_version(mmcv_minimum_version) <= mmcv_version
<= digit_version(mmcv_maximum_version)), \
f'MMCV=={mmcv.__ver... | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/__init__.py |
from .inference import inference_recognizer, init_recognizer
from .test import multi_gpu_test, single_gpu_test, collect_results_cpu
from .train import train_model
__all__ = [
'train_model', 'init_recognizer', 'inference_recognizer', 'multi_gpu_test',
'single_gpu_test', 'collect_results_cpu'
]
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/apis/__init__.py |
import os.path as osp
import pickle
import shutil
import tempfile
import pdb
import mmcv
import torch
import torch.distributed as dist
from mmcv.runner import get_dist_info
def single_gpu_test(model, data_loader):
"""Test model with a single gpu.
This method tests model with a single gpu and displays test pr... | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/apis/test.py |
import copy as cp
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (DistSamplerSeedHook, EpochBasedRunner, OptimizerHook,
build_optimizer)
from mmcv.runner.hooks import Fp16OptimizerHook
from ..core import (DistEpochEvalHook, EpochEvalHo... | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/apis/train.py |
import os
import os.path as osp
from operator import itemgetter
import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from ..datasets.pipelines import Compose
from ..models import build_recognizer
def init_recognizer(config,
checkpoint=None,
... | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/apis/inference.py |
from .evaluation import * # noqa: F401, F403
from .lr import * # noqa: F401, F403
from .optimizer import * # noqa: F401, F403
from .runner import * # noqa: F401, F403
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/__init__.py |
from .copy_of_sgd import CopyOfSGD
from .tsm_optimizer_constructor import TSMOptimizerConstructor
__all__ = ['CopyOfSGD', 'TSMOptimizerConstructor']
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/optimizer/__init__.py |
import torch
from mmcv.runner import OPTIMIZER_BUILDERS, DefaultOptimizerConstructor
from mmcv.utils import SyncBatchNorm, _BatchNorm, _ConvNd
@OPTIMIZER_BUILDERS.register_module()
class TSMOptimizerConstructor(DefaultOptimizerConstructor):
"""Optimizer constructor in TSM model.
This constructor builds optim... | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/optimizer/tsm_optimizer_constructor.py |
from mmcv.runner import OPTIMIZERS
from torch.optim import SGD
@OPTIMIZERS.register_module()
class CopyOfSGD(SGD):
"""A clone of torch.optim.SGD.
A customized optimizer could be defined like CopyOfSGD. You may derive from
built-in optimizers in torch.optim, or directly implement a new optimizer.
"""
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/optimizer/copy_of_sgd.py |
# Copyright (c) Open-MMLab. All rights reserved.
import time
import warnings
import mmcv
from mmcv.runner import EpochBasedRunner, Hook
from mmcv.runner.utils import get_host_info
def cycle(iterable):
iterator = iter(iterable)
while True:
try:
yield next(iterator)
except StopItera... | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/runner/omnisource_runner.py |
# Copyright (c) Open-MMLab. All rights reserved.
import os.path as osp
import platform
import shutil
import time
import warnings
import torch
import mmcv
from mmcv.runner import EpochBasedRunner
class AnnealingRunner(EpochBasedRunner):
def run_iter(self, data_batch, train_mode, **kwargs):
if 'a... | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/runner/annealing_runner.py |
from .omnisource_runner import OmniSourceDistSamplerSeedHook, OmniSourceRunner
from .annealing_runner import AnnealingRunner
__all__ = ['OmniSourceRunner', 'OmniSourceDistSamplerSeedHook', 'AnnealingRunner']
| InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/runner/__init__.py |
import os.path as osp
import warnings
from math import inf
import mmcv
from mmcv.runner import Hook
from torch.utils.data import DataLoader
from mmaction.utils import get_root_logger
class EpochEvalHook(Hook):
"""Non-Distributed evaluation hook based on epochs.
Notes:
If new arguments are added for... | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/eval_hooks.py |
from .accuracy import (average_precision_at_temporal_iou,
average_recall_at_avg_proposals, confusion_matrix,
get_weighted_score, interpolated_precision_recall,
mean_average_precision, mean_class_accuracy,
mmit_mean_average_preci... | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/__init__.py |
import numpy as np
def confusion_matrix(y_pred, y_real, normalize=None):
"""Compute confusion matrix.
Args:
y_pred (list[int] | np.ndarray[int]): Prediction labels.
y_real (list[int] | np.ndarray[int]): Ground truth labels.
normalize (str | None): Normalizes confusion matrix over the ... | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/accuracy.py |
import json
import numpy as np
from mmcv.utils import print_log
from ...utils import get_root_logger
from .accuracy import interpolated_precision_recall, pairwise_temporal_iou
class ActivityNetDetection:
"""Class to evaluate detection results on ActivityNet.
Args:
ground_truth_filename (str | None)... | InternVideo-main | Downstream/Open-Set-Action-Recognition/mmaction/core/evaluation/eval_detection.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.