python_code stringlengths 0 992k | repo_name stringlengths 8 46 | file_path stringlengths 5 162 |
|---|---|---|
import torch
from setuptools import setup, Extension
from torch.utils.cpp_extension import BuildExtension, CppExtension
setup(
name='nms_1d_cpu',
ext_modules=[
CppExtension(
name = 'nms_1d_cpu',
sources = ['./csrc/nms_cpu.cpp'],
extra_compile_args=['-fopenmp']
... | InternVideo-main | Downstream/Temporal-Action-Localization/libs/utils/setup.py |
import os
import shutil
import time
import json
import pickle
from typing import Dict
import numpy as np
import pdb
import torch
from scipy.special import softmax
from .metrics import ANETdetection
# def load_results_from_pkl(filename):
# # load from pickle file
# assert os.path.isfile(filename)
# with o... | InternVideo-main | Downstream/Temporal-Action-Localization/libs/utils/postprocessing.py |
import os
# backbone (e.g., conv / transformer)
backbones = {}
def register_backbone(name):
def decorator(cls):
backbones[name] = cls
return cls
return decorator
# neck (e.g., FPN)
necks = {}
def register_neck(name):
def decorator(cls):
necks[name] = cls
return cls
retu... | InternVideo-main | Downstream/Temporal-Action-Localization/libs/modeling/models.py |
import math
import torch
from torch import nn
from torch.nn import functional as F
from .models import register_meta_arch, make_backbone, make_neck, make_generator
from .blocks import MaskedConv1D, Scale, LayerNorm
from .losses import ctr_diou_loss_1d, sigmoid_focal_loss
from ..utils import batched_nms
class PtTra... | InternVideo-main | Downstream/Temporal-Action-Localization/libs/modeling/meta_archs.py |
from .blocks import (MaskedConv1D, MaskedMHCA, MaskedMHA, LayerNorm,
TransformerBlock, ConvBlock, Scale, AffineDropPath)
from .models import make_backbone, make_neck, make_meta_arch, make_generator
from . import backbones # backbones
from . import necks # necks
from . import loc_gener... | InternVideo-main | Downstream/Temporal-Action-Localization/libs/modeling/__init__.py |
import torch
from torch import nn
from torch.nn import functional as F
from .models import register_backbone
from .blocks import (get_sinusoid_encoding, TransformerBlock, MaskedConv1D,
ConvBlock, LayerNorm)
@register_backbone("convTransformer")
class ConvTransformerBackbone(nn.Module):
"""
... | InternVideo-main | Downstream/Temporal-Action-Localization/libs/modeling/backbones.py |
import torch
from torch import nn
from torch.nn import functional as F
from .models import register_neck
from .blocks import MaskedConv1D, LayerNorm
@register_neck("fpn")
class FPN1D(nn.Module):
"""
Feature pyramid network
"""
def __init__(
self,
in_channels, # input feature c... | InternVideo-main | Downstream/Temporal-Action-Localization/libs/modeling/necks.py |
import torch
from torch.nn import functional as F
@torch.jit.script
def sigmoid_focal_loss(
inputs: torch.Tensor,
targets: torch.Tensor,
alpha: float = 0.25,
gamma: float = 2.0,
reduction: str = "none",
) -> torch.Tensor:
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs... | InternVideo-main | Downstream/Temporal-Action-Localization/libs/modeling/losses.py |
import torch
from torch import nn
from torch.nn import functional as F
from .models import register_generator
class BufferList(nn.Module):
"""
Similar to nn.ParameterList, but for buffers
Taken from https://github.com/facebookresearch/detectron2/blob/master/detectron2/modeling/anchor_generator.py
""... | InternVideo-main | Downstream/Temporal-Action-Localization/libs/modeling/loc_generators.py |
import math
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from .weight_init import trunc_normal_
class MaskedConv1D(nn.Module):
"""
Masked 1D convolution. Interface remains the same as Conv1d.
Only support a sub set of 1d convs
"""
def __init__(
self... | InternVideo-main | Downstream/Temporal-Action-Localization/libs/modeling/blocks.py |
# from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/weight_init.py
import torch
import math
import warnings
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://p... | InternVideo-main | Downstream/Temporal-Action-Localization/libs/modeling/weight_init.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --... | InternVideo-main | Pretrain/VideoMAE/engine_for_finetuning.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --... | InternVideo-main | Pretrain/VideoMAE/masking_generator.py |
# -*- coding: utf-8 -*-
import argparse
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# http... | InternVideo-main | Pretrain/VideoMAE/run_mae_vis.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --... | InternVideo-main | Pretrain/VideoMAE/transforms.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --... | InternVideo-main | Pretrain/VideoMAE/engine_for_pretraining.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --... | InternVideo-main | Pretrain/VideoMAE/modeling_pretrain.py |
import io
import os
import random
import cv2
import decord
import numpy as np
import torch
from decord import VideoReader, cpu
from petrel_client.client import Client
from PIL import Image
class HybridVideoMAE(torch.utils.data.Dataset):
"""Load your own video classification dataset.
Parameters
----------... | InternVideo-main | Pretrain/VideoMAE/mae.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --... | InternVideo-main | Pretrain/VideoMAE/datasets.py |
import os
import warnings
import cv2
import numpy as np
import torch
from decord import VideoReader, cpu
from petrel_client.client import Client
from torch.utils.data import Dataset
from torchvision import transforms
import video_transforms as video_transforms
import volume_transforms as volume_transforms
from random... | InternVideo-main | Pretrain/VideoMAE/ssv2.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
This implementation is based on
https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/auto_augment.py
pulished under an Apache License 2.0.
COMMENT FROM ORIGINAL:
AutoAugment, RandAugment, and AugMix for PyTorch
This code im... | InternVideo-main | Pretrain/VideoMAE/rand_augment.py |
import numpy as np
import torch
from PIL import Image
def convert_img(img):
"""Converts (H, W, C) numpy.ndarray to (C, W, H) format
"""
if len(img.shape) == 3:
img = img.transpose(2, 0, 1)
if len(img.shape) == 2:
img = np.expand_dims(img, 0)
return img
class ClipToTensor(object):... | InternVideo-main | Pretrain/VideoMAE/volume_transforms.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --... | InternVideo-main | Pretrain/VideoMAE/run_class_finetuning.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --... | InternVideo-main | Pretrain/VideoMAE/run_mae_pretraining.py |
import numbers
import cv2
import numpy as np
import PIL
import torch
def _is_tensor_clip(clip):
return torch.is_tensor(clip) and clip.ndimension() == 4
def crop_clip(clip, min_h, min_w, h, w):
if isinstance(clip[0], np.ndarray):
cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip]
... | InternVideo-main | Pretrain/VideoMAE/functional.py |
# --------------------------------------------------------
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookre... | InternVideo-main | Pretrain/VideoMAE/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from functools import partial, reduce
from operator import mul
import torch
import torch.nn as nn
from timm.mod... | InternVideo-main | Pretrain/VideoMAE/vits.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --... | InternVideo-main | Pretrain/VideoMAE/run_class_linear.py |
import os
import numpy as np
from matplotlib import use
from scipy.special import softmax
def merge(eval_paths, num_tasks, use_softmax=False):
dict_feats = {}
dict_label = {}
print("Reading individual output files")
if not isinstance(eval_paths, list):
eval_paths = [eval_paths]
for eval... | InternVideo-main | Pretrain/VideoMAE/ensemble.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
This implementation is based on
https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/random_erasing.py
pulished under an Apache License 2.0.
COMMENT FROM ORIGINAL:
Originally inspired by impl at https://github.com/zhunzhong... | InternVideo-main | Pretrain/VideoMAE/random_erasing.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import math
import numbers
# import cv2
import random
import numpy as np
import PIL
import torch
import torchvision
import torchvision.transforms.functional as F
from PIL import Image
from torchvision import transforms
im... | InternVideo-main | Pretrain/VideoMAE/video_transforms.py |
# pylint: disable=line-too-long,too-many-lines,missing-docstring
import io
import os
import random
import warnings
import cv2
import decord
import numpy as np
import torch
import torch.distributed as dist
from decord import VideoReader, cpu
from numpy.lib.function_base import disp
from petrel_client.client import Clie... | InternVideo-main | Pretrain/VideoMAE/anet.py |
# pylint: disable=line-too-long,too-many-lines,missing-docstring
import io
import os
import random
import warnings
import cv2
import decord
import numpy as np
import torch
from decord import VideoReader, cpu
from petrel_client.client import Client
from PIL import Image
from torch.utils.data import Dataset
from torchvi... | InternVideo-main | Pretrain/VideoMAE/kinetics.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --... | InternVideo-main | Pretrain/VideoMAE/modeling_finetune.py |
# --------------------------------------------------------
# Based on BEiT, timm, DINO and DeiT code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --... | InternVideo-main | Pretrain/VideoMAE/optim_factory.py |
import torch
import InternVideo
text_cand = ["an airplane is taking off", "an airplane is flying", "a dog is chasing a ball"]
video = InternVideo.load_video("./data/demo.mp4").cuda()
model = InternVideo.load_model("./models/InternVideo-MM-L-14.ckpt").cuda()
text = InternVideo.tokenize(
text_cand
).cuda()
with to... | InternVideo-main | Pretrain/Multi-Modalities-Pretraining/demo.py |
from .internvideo import * | InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/__init__.py |
import numbers
import random
import numpy as np
import PIL
import skimage
import skimage.transform
import torchvision
import torch
from torchvision import transforms
from PIL import Image
import torch
import cv2
def _is_tensor_clip(clip):
return torch.is_tensor(clip) and clip.ndimension() == 4
def crop_clip(cli... | InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/video_transform.py |
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corr... | InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/simple_tokenizer.py |
import torch
import numpy as np
import decord
from typing import Any, OrderedDict, Union, List
from pkg_resources import packaging
from torchvision import transforms
from . import video_transform
from .simple_tokenizer import SimpleTokenizer as _Tokenizer
from .clip_utils.model import build_model
__all__ = ["load_m... | InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/internvideo.py |
from .clip import *
| InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/__init__.py |
from collections import OrderedDict
from typing import Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.checkpoint import checkpoint_sequential
from . import utils
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
... | InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/model.py |
import hashlib
import os
import urllib
import warnings
from typing import Any, Union, List
from pkg_resources import packaging
import torch
from PIL import Image
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from tqdm import tqdm
from .model import build_model
from .simple_tokeni... | InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/clip.py |
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corr... | InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/simple_tokenizer.py |
#!/usr/bin/env python
import warnings
from typing import Tuple, Optional
import torch
from torch import Tensor
from torch.nn.modules.linear import Linear
from torch.nn.init import xavier_uniform_
from torch.nn.init import constant_
from torch.nn.init import xavier_normal_
from torch.nn.parameter import Parameter
from... | InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/attention.py |
#!/usr/bin/env python
import os
from collections import OrderedDict
from timm.models.layers import DropPath
import torch
from torch import nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from .attention import MultiheadAttention
import logging
logger = logging.getLogger(__name__)
MODE... | InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/clip_vit_only_global.py |
# from .evl_module import TransformerDecoder
from .clip_vit_only_global import vit_only_global_b32, vit_only_global_b16, vit_only_global_l14, vit_only_global_l14_336 | InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/__init__.py |
r"""Functional interface"""
import warnings
import math
import torch
from torch import _VF
from torch._jit_internal import Optional, Tuple
from torch.overrides import has_torch_function, handle_torch_function
from torch.nn.functional import pad, linear, softmax, dropout
Tensor = torch.Tensor
def multi_head_attenti... | InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/attention_module.py |
r"""Functional interface"""
import warnings
import math
import torch
from torch import _VF
from torch._jit_internal import Optional, Tuple
from torch.overrides import has_torch_function, handle_torch_function
from torch.nn.functional import pad, linear, softmax, dropout
Tensor = torch.Tensor
def multi_head_attenti... | InternVideo-main | Pretrain/Multi-Modalities-Pretraining/InternVideo/clip_utils/utils/attention_module_bias.py |
from TerraByte.model.terrabyte_triton import TerraByteTriton as TerraByte
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_... | TerraByte-master | train_triton.py |
from setuptools import setup, find_packages
setup(
name = 'TerraByte',
packages = find_packages(),
version = '0.1.5',
license='MIT',
description = 'TerraByte - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Kye Gomez',
author_email = 'kye@apac.ai',
url = 'https://github.com/kyeg... | TerraByte-master | setup.py |
import torch
from TerraByte import TerraByte
model = TerraByte(
num_tokens = 16000,
dim = (512, 256),
dim_head=64,
dilation_rate=4,
segment_size=2,
max_seq_len = (1024, 4),
depth = (6, 4),
dim_head = 64,
heads = 8,
)
x = torch.randint(0, 16000, (1, 1024,... | TerraByte-master | example.py |
from TerraByte import TerraByte
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 2e-4
... | TerraByte-master | train.py |
import unittest
import torch
from torch.nn import Dropout
from torch import einsum
from torch import nn
from torch.testing import assert_allclose
from TerraByte.model.attend import Attend, FlashAttention, EfficientAttentionConfig
class TestAttending(unittest.TestCase):
def setUp(self):
self.attend = Att... | TerraByte-master | testing/attention.py |
from TerraByte.model.model import TerraByte
import torch
class TerraByte:
def __init__(self,
num_tokens = 16000,
dim = (512, 256),
dilation_rate=4,
segment_size=2,
max_seq_len = (1024, 4),
depth = (6, 4), ... | TerraByte-master | TerraByte/terrabyte.py |
from TerraByte.model.model import TerraByte
| TerraByte-master | TerraByte/__init__.py |
TerraByte-master | TerraByte/training/__init__.py | |
import math
import multiprocessing
import os
from datetime import timedelta
from functools import partial
from itertools import chain
import torch
from accelerate import Accelerator
from accelerate.utils import DummyOptim, DummyScheduler, InitProcessGroupKwargs
from datasets import load_dataset
from lion_pytorch impor... | TerraByte-master | TerraByte/training/train.py |
import torch
# This is the unfused version of StableAdamW. It is slower than the fused version (coming).
class StableAdamWUnfused(torch.optim.Optimizer):
def __init__(
self,
params,
lr=0.002,
weight_decay=0.2,
betas=(0.9, 0.99),
eps=1e-8,
clip_thresh=1.0,
... | TerraByte-master | TerraByte/utils/stable_adamw.py |
TerraByte-master | TerraByte/utils/__init__.py | |
import gzip
import html
import io
import math
from functools import lru_cache
from typing import Callable, List, Optional, Tuple
import ftfy
import numpy as np
import regex as re
import torch
import torch.nn as nn
from iopath.common.file_io import g_pathmgr
from timm.models.layers import trunc_normal_
from TerraByte.... | TerraByte-master | TerraByte/model/multimodal_preprocessor.py |
import torch.nn as nn
from einops import rearrange
from TerraByte.model.attend import Attend
from TerraByte.model.helpers import RMSNorm, apply_rotary_pos_emb, exists
############## ATTENTION
class Attention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,... | TerraByte-master | TerraByte/model/attention.py |
# class Transformer(nn.Module):
# def __init__(
# self,
# *,
# dim,
# layers,
# dim_head = 64,
# heads = 8,
# attn_dropout = 0.,
# ff_dropout = 0.,
# ff_mult = 4,
# rel_pos_bias = True,
# flash_attn = True,
# ):
# su... | TerraByte-master | TerraByte/model/transformer_alibi.py |
TerraByte-master | TerraByte/model/__init__.py | |
from itertools import zip_longest
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from beartype import beartype
from beartype.typing import Tuple, Union
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from torch import nn
from tqdm import tqdm
from TerraBy... | TerraByte-master | TerraByte/model/model.py |
from collections import namedtuple
from functools import wraps
from packaging import version
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_me... | TerraByte-master | TerraByte/model/attend.py |
from itertools import zip_longest
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from beartype import beartype
from beartype.typing import Tuple, Union
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from torch import nn
from tqdm import tqdm
from TerraBy... | TerraByte-master | TerraByte/model/megabyte.py |
from typing import Tuple
import torch
from beartype.typing import Tuple
from einops import rearrange
from einops.layers.torch import Rearrange
from torch import Tensor, nn
class PatchEmbeddings(nn.Module):
def __init__(self, dim_in, dim_out, seq_len):
super().__init__()
self.embedding = nn.Sequ... | TerraByte-master | TerraByte/model/patches.py |
import torch
import triton
import triton.language as tl
@triton.jit
def max_fn(x, y):
return tl.math.max(x, y)
@triton.jit
def _fwd_kernel(
Q, K, V, sm_scale,
L,
Out,
stride_qz, stride_qh, stride_qm, stride_qk,
stride_kz, stride_kh, stride_kn, stride_kk,
stride_vz, stride_vh, stride_vk,... | TerraByte-master | TerraByte/model/attention_triton.py |
import torch.nn as nn
from TerraByte.model.helpers import RotaryEmbedding, FeedForward, RMSNorm, token_shift, exists
from TerraByte.model.attention import Attention
class Transformer(nn.Module):
def __init__(
self,
*,
dim,
layers,
dim_head = 64,
heads = 8,
at... | TerraByte-master | TerraByte/model/transformer.py |
from itertools import zip_longest
from typing import Tuple, Union
import torch
import torch.nn.functional as F
from beartype import beartype
from beartype.typing import Tuple, Union
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from torch import nn
from tqdm import tqdm
from TerraByt... | TerraByte-master | TerraByte/model/omnibyte.py |
from itertools import zip_longest
from typing import Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from beartype import beartype
from beartype.typing import Tuple, Union
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from torch import nn
from tqdm impor... | TerraByte-master | TerraByte/model/terrabyte_triton.py |
import math
import torch
import triton
import triton.language as tl
# Disabling autotune for now, set num_warps=4 if headdim=64 and num_warps=8 if headdim=128
# @triton.autotune(
# configs=[
# triton.Config({"BLOCK_M": 128, "BLOCK_N": 128}, num_warps=4, num_stages=1),
# # This config has a race... | TerraByte-master | TerraByte/model/flash_triton.py |
import functools
import math
import einops
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import pack, rearrange, unpack
from torch import nn
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def pack_... | TerraByte-master | TerraByte/model/helpers.py |
def main():
print("Welcome!Input the number of hours on Earth so you can see how many hours pass by on Europa")
userInput = int(input("How many Europa days go by for every x Earth day"))
Europa = userInput * 3.551 #hours for 1 day in Europa 85.224
print(f"{userInput} days on Earth is {Eur... | 601-daysthatgobyonEuropa-main | 601_assignment.py |
import os
# from tree_of_thoughts.openaiModels import OpenAILanguageModel
# from tree_of_thoughts.treeofthoughts import TreeofThoughts
from meta_tree_of_thoughts.treeofthoughts import TreeofThoughts, MonteCarloTreeofThoughts
from meta_tree_of_thoughts.thinkingAgent import ThinkingAgent
from meta_tree_of_thoughts.openai... | Meta-Tree-Of-Thoughts-main | example.py |
import os
import time
import json
import logging
import argparse
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
from typing import Any, Dict, List, Optional, Set, Tuple, Union
from meta_tree_of_thoughts.thinkingAgent import ThinkingAgent... | Meta-Tree-Of-Thoughts-main | meta_tree_of_thoughts/treeofthoughts.py |
from abc import ABC, abstractmethod
import random
from meta_tree_of_thoughts.metaAgent import MetaAgent
class AbstractLanguageModel(ABC):
@abstractmethod
def generate_text(self, prompt):
pass
class ThinkingAgent:
def __init__(self, model: AbstractLanguageModel, strategy="cot", evaluation_strategy... | Meta-Tree-Of-Thoughts-main | meta_tree_of_thoughts/thinkingAgent.py |
from abc import ABC, abstractmethod
import openai
import langchain
from dotenv import load_dotenv
from langchain import OpenAI, LLMChain, PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferWindowMemory
load_dotenv()
#tree of thoughts
class MetaAgent():
de... | Meta-Tree-Of-Thoughts-main | meta_tree_of_thoughts/metaAgent.py |
import os
import openai
import time
import concurrent.futures
from abc import ABC, abstractmethod
class OpenAILanguageModel():
def __init__(self, api_key, strategy="cot", evaluation_strategy="value", api_model="", enable_ReAct_prompting=True):
if api_key == "" or api_key == None:
api_key = o... | Meta-Tree-Of-Thoughts-main | meta_tree_of_thoughts/openaiModel.py |
import torch
from starlight_vision import Starlight
# Example of usage:
model = Starlight()
texts = [
'a whale breaching from afar',
'young girl blowing out candles on her birthday cake',
'fireworks with blue and green sparkles',
'dust motes swirling in the morning sunshine on the windowsill'
]
video... | StarlightVision-master | example.py |
from starlight_vision.model import Starlight | StarlightVision-master | starlight_vision/__init__.py |
from starlight_vision import Unet3D, ElucidatedStarlight, StarlightTrainer
class Starlight:
def __init__(self,
dim=64,
dim_mults=(1, 2, 4, 8),
image_sizes=(16, 32),
random_crop_sizes=(None, 16),
temporal_downsample_factor=(2, 1... | StarlightVision-master | starlight_vision/model.py |
import os
from collections.abc import Iterable
from contextlib import contextmanager, nullcontext
from functools import partial, wraps
from math import ceil
import numpy as np
import pytorch_warmup as warmup
import torch
import torch.nn.functional as F
from accelerate import Accelerator, DistributedDataParallelKwargs,... | StarlightVision-master | starlight_vision/trainer.py |
from functools import wraps
from packaging import version
from collections import namedtuple
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
AttentionConfig = namedtuple('AttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# ... | StarlightVision-master | starlight_vision/core/attention.py |
from typing import List
import torch
import transformers
from einops import rearrange
from transformers import T5Config, T5EncoderModel, T5Tokenizer
transformers.logging.set_verbosity_error()
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if ca... | StarlightVision-master | starlight_vision/core/t5.py |
from math import sqrt
from random import random
from functools import partial
from contextlib import contextmanager, nullcontext
from typing import List, Union
from collections import namedtuple
from tqdm.auto import tqdm
import torch
import torch.nn.functional as F
from torch import nn, einsum
from torch.cuda.amp imp... | StarlightVision-master | starlight_vision/core/elucidated.py |
StarlightVision-master | starlight_vision/core/__init__.py | |
import torch
import torch.nn as nn
from torchvision.transforms import Compose, Resize, Normalize, ToTensor
from torch.utils.data import DataLoader
from transformers import DiffusionModel, ClipModel, DiffusionConfig, DPTImageProcessor, DPTForDepthEstimation
from torchvision.transforms import GaussianBlur
import torch.... | StarlightVision-master | starlight_vision/core/starlightv2.py |
import math
import copy
from random import random
from beartype.typing import List, Union
from beartype import beartype
from tqdm.auto import tqdm
from functools import partial, wraps
from contextlib import contextmanager, nullcontext
from collections import namedtuple
from pathlib import Path
import torch
import torc... | StarlightVision-master | starlight_vision/core/gen2.py |
import torch
import torch.nn as nn
from torchvision.transforms import Compose, Resize, Normalize, ToTensor
from torch.utils.data import DataLoader
from transformers import DiffusionModel, ClipModel, DiffusionConfig, DPTImageProcessor, DPTForDepthEstimation
from torchvision.transforms import GaussianBlur
import torch.... | StarlightVision-master | starlight_vision/core/starlight.py |
import math
import copy
import operator
import functools
from typing import List
from tqdm.auto import tqdm
from functools import partial, wraps
from contextlib import contextmanager, nullcontext
from collections import namedtuple
from pathlib import Path
import torch
import torch.nn.functional as F
from torch import ... | StarlightVision-master | starlight_vision/core/gen2_video.py |
from setuptools import setup, find_packages
setup(
name = 'VisualNexus',
packages = find_packages(exclude=['examples']),
version = '0.0.1',
license='MIT',
description = 'VisualNexus - Pytorch',
author = 'Kye Gomez',
author_email = 'kye@apac.ai',
url = 'https://github.com/kyegomez/VisualNexus',
long_d... | VisualNexus-master | setup.py |
from datasets import Dataset
import pandas as pd
from models.sag_img import SAG_IMG
from models.sag_video import SAG_VID
import os
from datasets import load_dataset
def load_hf_dataset(dataset_name):
#custom logic
pass
class SAG_MEDIA:
"""
SAG_MEDIA: Segment Anything for Image and Video.
This ... | VisualNexus-master | VisualNexus/models/sag_both.py |
from datasets import load_dataset
from metaseq import SegAutoMaskPredictor
import os
from datasets import Dataset
import pandas as pd
class SAG_VID:
def __init__(self, model_type='vit_1', points_per_side=16, points_per_batch=64, min_area=1000, output_dir='./output'):
"""
Segment anything for... | VisualNexus-master | VisualNexus/models/sag_video.py |
import os
import pandas as pd
from pathlib import Path
from datasets import Dataset
from mobile_sam import SamAutomaticMaskGenerator
import numpy as np
class MobileSAM:
def __init__(self, img_path: str, output: str, hf_dataset, text_prompt=None):
self.img_path = img_path
self.output = output
... | VisualNexus-master | VisualNexus/models/mobile_sam.py |
from VisualNexus.models.sag_img import SAG_IMG
from VisualNexus.models.sag_video import SAG_VID
| VisualNexus-master | VisualNexus/models/__init__.py |
import os
from pathlib import Path
from ultralytics import YOLO
from FastSAM.utils.tools import fast_process, convert_box_xywh_to_xyxy, format_results, box_prompt, point_prompt, text_prompt
import ast
import torch
import cv2
import numpy as np
from datasets import load_dataset
from pathlib import Path
import os
from ... | VisualNexus-master | VisualNexus/models/sag_img.py |
from ultralytics import YOLO
import gradio as gr
import torch
from utils.tools_gradio import fast_process, format_results, box_prompt, point_prompt
from PIL import ImageDraw
import numpy as np
# Load the pre-trained model
model = YOLO('./weights/FastSAM.pt')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# ... | VisualNexus-master | VisualNexus/models/FastSAM/app_gradio.py |
# Prediction interface for Cog ⚙️
# https://github.com/replicate/cog/blob/main/docs/python.md
# Thanks for chenxwh.
import argparse
import cv2
import shutil
import ast
from cog import BasePredictor, Input, Path
from ultralytics import YOLO
from utils.tools import *
class Predictor(BasePredictor):
def setup(self)... | VisualNexus-master | VisualNexus/models/FastSAM/predict.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.