python_code stringlengths 0 992k | repo_name stringlengths 8 46 | file_path stringlengths 5 162 |
|---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import logging
import os
from omegaconf import OmegaConf
import dinov2.distributed as distributed
from dino... | EXA-1-master | exa/models/dinov2/dinov2/utils/config.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Union
import numpy as np
import torch
TypeSpec = Union[str, np.dtype, torch.dtype]
_NUMPY_... | EXA-1-master | exa/models/dinov2/dinov2/utils/dtype.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/models/dinov2/dinov2/utils/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import random
import subprocess
from urllib.parse import urlparse
import numpy as np
import tor... | EXA-1-master | exa/models/dinov2/dinov2/utils/utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
import os
from pathlib import Path
from typing import Any, Dict, Optional
class ClusterType(Enum)... | EXA-1-master | exa/models/dinov2/dinov2/utils/cluster.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
import logging
logger = logging.getLogger("dinov2")
def get_vit_lr_decay_rate(nam... | EXA-1-master | exa/models/dinov2/dinov2/utils/param_groups.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# References:
# https://github.com/facebookresearch/dino/blob/main/vision_transformer.py
# https://github.com/rwightm... | EXA-1-master | exa/models/dinov2/dinov2/models/vision_transformer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from . import vision_transformer as vits
logger = logging.getLogger("dinov2")
def build_model(args, o... | EXA-1-master | exa/models/dinov2/dinov2/models/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any
import torch
import dinov2.distributed as distributed
from functools import partial
fro... | EXA-1-master | exa/models/dinov2/dinov2/fsdp/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pathlib
from omegaconf import OmegaConf
def load_config(config_name: str):
config_filename = config_name + ... | EXA-1-master | exa/models/dinov2/dinov2/configs/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .train import get_args_parser, main
from .ssl_meta_arch import SSLMetaArch
| EXA-1-master | exa/models/dinov2/dinov2/train/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from functools import partial
import logging
import torch
from torch import nn
from dinov2.loss import DINOLoss, iBOTPa... | EXA-1-master | exa/models/dinov2/dinov2/train/ssl_meta_arch.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import math
import os
from functools import partial
from fvcore.common.checkpoint import ... | EXA-1-master | exa/models/dinov2/dinov2/train/train.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
import logging
from typing import Any, Dict, Optional
import torch
from torch import Tensor
from t... | EXA-1-master | exa/models/dinov2/dinov2/eval/metrics.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from functools import partial
import json
import logging
import os
import sys
from typing import List, Op... | EXA-1-master | exa/models/dinov2/dinov2/eval/linear.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import gc
import logging
import sys
import time
from typing import List, Optional
from cuml.linear_model... | EXA-1-master | exa/models/dinov2/dinov2/eval/log_regression.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/models/dinov2/dinov2/eval/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from typing import Any, List, Optional, Tuple
import torch
import torch.backends.cudnn as cudnn
from di... | EXA-1-master | exa/models/dinov2/dinov2/eval/setup.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Dict, Optional
import torch
from torch import nn
from torchmetrics import MetricCollec... | EXA-1-master | exa/models/dinov2/dinov2/eval/utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from functools import partial
import json
import logging
import os
import sys
from typing import List, Op... | EXA-1-master | exa/models/dinov2/dinov2/eval/knn.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import os
from pathlib import Path
from typing import List, Optional
import submitit
fro... | EXA-1-master | exa/models/dinov2/dinov2/run/submit.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/models/dinov2/dinov2/run/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from dinov2.logging import setup_logging
from dinov2.train import get_args_parser as... | EXA-1-master | exa/models/dinov2/dinov2/run/train/train.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from dinov2.eval.linear import get_args_parser as get_linear_args_parser
from dinov2... | EXA-1-master | exa/models/dinov2/dinov2/run/eval/linear.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from dinov2.eval.log_regression import get_args_parser as get_log_regression_args_pa... | EXA-1-master | exa/models/dinov2/dinov2/run/eval/log_regression.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
from dinov2.eval.knn import get_args_parser as get_knn_args_parser
from dinov2.loggi... | EXA-1-master | exa/models/dinov2/dinov2/run/eval/knn.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Sequence
import torch
from torchvision import transforms
class GaussianBlur(transforms.RandomApply)... | EXA-1-master | exa/models/dinov2/dinov2/data/transforms.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import random
def collate_data_and_cast(samples_list, mask_ratio_tuple, mask_probability, dtype, n_tokens=... | EXA-1-master | exa/models/dinov2/dinov2/data/collate.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from enum import Enum
from typing import Any, Callable, List, Optional, TypeVar
import torch
from torch.u... | EXA-1-master | exa/models/dinov2/dinov2/data/loaders.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .adapters import DatasetWithEnumeratedTargets
from .loaders import make_data_loader, make_dataset, SamplerType
from ... | EXA-1-master | exa/models/dinov2/dinov2/data/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import random
import math
import numpy as np
class MaskingGenerator:
def __init__(
self,
input_size... | EXA-1-master | exa/models/dinov2/dinov2/data/masking.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import itertools
from typing import Any, Optional
import warnings
import numpy as np
import torch
from torch.utils.data.... | EXA-1-master | exa/models/dinov2/dinov2/data/samplers.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from torchvision import transforms
from .transforms import (
GaussianBlur,
make_normalize_transf... | EXA-1-master | exa/models/dinov2/dinov2/data/augmentations.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Tuple
from torch.utils.data import Dataset
class DatasetWithEnumeratedTargets(Dataset):
de... | EXA-1-master | exa/models/dinov2/dinov2/data/adapters.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import csv
from enum import Enum
import os
from typing import Callable, List, Optional, Tuple, Union
import numpy as np
... | EXA-1-master | exa/models/dinov2/dinov2/data/datasets/image_net.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from io import BytesIO
from typing import Any, Tuple
from PIL import Image
class Decoder:
def decode(self) -> Any:... | EXA-1-master | exa/models/dinov2/dinov2/data/datasets/decoders.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Tuple
from torchvision.datasets import VisionDataset
from .decoders import Decoder, TargetDecod... | EXA-1-master | exa/models/dinov2/dinov2/data/datasets/extended.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .image_net import ImageNet
from .image_net_22k import ImageNet22k
| EXA-1-master | exa/models/dinov2/dinov2/data/datasets/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from enum import Enum
from functools import lru_cache
from gzip import GzipFile
from io... | EXA-1-master | exa/models/dinov2/dinov2/data/datasets/image_net_22k.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import functools
import logging
import os
import sys
from typing import Optional
import dinov2.distributed as distribute... | EXA-1-master | exa/models/dinov2/dinov2/logging/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict, deque
import datetime
import json
import logging
import time
import torch
import d... | EXA-1-master | exa/models/dinov2/dinov2/logging/helpers.py |
import torch
import torch.nn as nn
import math
from scipy.special import gamma as scipy_gamma
import functools
import torch.jit
def gamma(n):
return math.gamma(n)
@torch.jit.script
# def caputo_approximation(x, base_activation, derivative_order, h, n):
# k = torch.arange(n).float()
# x_expanded = x.view(-... | EXA-1-master | exa/modular_components/activations/neox/neox5.py |
import torch
import torch.nn as nn
import math
from scipy.special import gamma as scipy_gamma
import math
import functools
# @functools.lru_cache(maxsize=None)
# def memoized_base_activation(x):
# return base_activation(x)
def gamma(n):
return math.gamma(n)
# def caputo_approximation(x, base_activation, de... | EXA-1-master | exa/modular_components/activations/neox/neox4.py |
import torch
import torch.nn as nn
from neox2 import SimplifiedOptimizedFractionalActivation
# import torch
# import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset, random_split
# from neox3 import CaputoFractionalActivation
# from neox4 import CaputoFractionalActivati... | EXA-1-master | exa/modular_components/activations/neox/test.py |
import torch
import torch.nn as nn
def fractional_derivative(x, base_activation, derivative_order, h=1e-5):
# Apply base activation function on x
base = base_activation(x)
# Apply base activation function on x + h
base_plus_h = base_activation(x + h)
# Compute the fractional derivative using Grue... | EXA-1-master | exa/modular_components/activations/neox/neox.py |
import torch
import torch.nn as nn
import math
import torch.jit
def relu_activation(x):
return torch.relu(x)
@torch.jit.script
def caputo_approximation(x, derivative_order, h, n):
k = torch.arange(n).float().view(1, -1)
x_expanded = x.view(-1, 1)
h_expanded = h.view(-1, 1)
factorial_k = torch... | EXA-1-master | exa/modular_components/activations/neox/neo7.py |
import torch
import torch.nn as nn
import math
def gamma(n):
return math.gamma(n)
def caputo_approximation(x, base_activation, derivative_order, h, n):
sum_terms = 0.0
for k in range(n):
term = ((-1)**k) * gamma(derivative_order + k + 1) / (math.factorial(k) * gamma(derivative_order + 1)) * (base_... | EXA-1-master | exa/modular_components/activations/neox/neox3.py |
import torch
import torch.nn as nn
import math
from scipy.special import gamma as scipy_gamma
import functools
import torch.jit
def gamma(n):
return math.gamma(n)
def relu_activation(x):
return torch.relu(x)
def factorial_tensor(tensor):
return torch.tensor([math.factorial(int(k_i)) for k_i in tensor])... | EXA-1-master | exa/modular_components/activations/neox/neox6.py |
import torch
import torch.nn as nn
def fractional_derivative(x, base_activation, derivative_order, h):
base = base_activation(x)
base_plus_h = base_activation(x + h)
fractional_derivative = ((base_plus_h - base) / h) ** derivative_order
return fractional_derivative
class SimplifiedOptimizedFractionalA... | EXA-1-master | exa/modular_components/activations/neox/neox2.py |
import cmath
import math
import torch
import numpy as np
from scipy.integrate import solve_ivp
import torch.optim
import sympy as sp
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
def jones_polynomial_torus_knot(m, n):
t = sp.symbols('t')
numerator = t**((m-1) * (n-1)/2) * (1 - t ** ... | EXA-1-master | exa/modular_components/activations/KNOTX/v4.py |
import torch
import numpy as np
from scipy.integrate import solve_ivp
import asyncio
import concurrent.futures
# Set the default device to use GPU if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def lorenz_ode(x0, y0, z0, sigma=10, rho=28, beta=8/3, dt=0.01, steps=1000):
x, y, z... | EXA-1-master | exa/modular_components/activations/KNOTX/clean.py |
import cmath
import math
import torch
import numpy as np
from scipy.integrate import solve_ivp
import torch.optim
import sympy as sp
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
def lorenz_system(t, state, sigma, rho, beta):
x, y, z = state
dx_dt = sigma * (y - x)
dy_dt = x * (... | EXA-1-master | exa/modular_components/activations/KNOTX/v5.py |
#who fucking knows if this code will work
import torch
import numpy as np
from scipy.integrate import solve_ivp
# Define the knot_invariant function
def knot_invariant(x):
# Convert the input value x into a knot representation
def knot_representation(x):
return x * 2
# Calculate the knot invaria... | EXA-1-master | exa/modular_components/activations/KNOTX/knotx.py |
import torch
import numpy as np
from scipy.integrate import solve_ivp
#define the knot invarient function
def knot_invariant(x):
#convert the input value x into a knot representation
def knot_representation(x):
return x * 2
#calculate the knot invariant using a specific knot invariant algorit... | EXA-1-master | exa/modular_components/activations/KNOTX/knotxv2.py |
# import torch
# import numpy as np
# from scipy.integrate import solve_ivp
# import torch.optim
# import sympy as sp
# import torch.nn as nn
# import torch.utils.data
# import concurrent.futures
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# def lorenz_system(t, state, sigma, rho, beta):
#... | EXA-1-master | exa/modular_components/activations/KNOTX/working.py |
import numpy as np
import torch
from scipy.integrate import solve_ivp
def convert_to_knot_representation(x):
m = int(np.ceil(x))
n = m + 1
return (m, n)
def lorenz_system(t, y, sigma=10, rho=28, beta=8/3):
x, y, z = y
dxdt = sigma * (y - x)
dydt = x * (rho - z) - y
dzdt = x * y - beta * z
... | EXA-1-master | exa/modular_components/activations/KNOTX/knotODE.py |
import torch
import numpy as np
from scipy.integrate import solve_ivp
import torch.optim
import sympy as sp
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import concurrent.futures
import asyncio
import torch.jit
# Set the default device to use GPU if available
device = torch.device("cuda"... | EXA-1-master | exa/modular_components/activations/KNOTX/experimental_async.py |
#vectorized operations
#cache knot invariants
#faster approamiation
#paralleize the lorenz system solver
# use a lower order ode solver\
import math
import torch
import numpy as np
from scipy.integrate import solve_ivp
import torch.optim
import sympy as sp
import torch.nn as nn
import torch.optim as optim
import torc... | EXA-1-master | exa/modular_components/activations/KNOTX/v6.py |
import torch
import numpy as np
from scipy.integrate import solve_ivp
import torch.optim
import sympy as sp
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import concurrent.futures
import asyncio
# Set the default device to use GPU if available
device = torch.device("cuda" if torch.cuda.is_... | EXA-1-master | exa/modular_components/activations/KNOTX/async.py |
import torch
import numpy as np
from scipy.integrate import solve_ivp
import torch.optim
import sympy as sp
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import concurrent.futures
def lorenz_system(t, state, sigma, rho, beta):
x, y, z = state
dx_dt = sigma * (y - x)
dy_dt = x *... | EXA-1-master | exa/modular_components/activations/KNOTX/main.py |
import torch
import numpy as np
from scipy.integrate import solve_ivp
import torch.optim
import sympy as sp
import torch.nn as nn
import torch.optim as optim
def jones_polynomial_torus_knot(m, n):
t = sp.symbols('t')
numerator = t**((m-1) * (n-1)/2) * (1 - t ** (m + 1) - t**(n + 1) + t**(m+n))
denominato... | EXA-1-master | exa/modular_components/activations/KNOTX/v3.py |
import sympy as sp
def jones_polynomial_torus_knot(m, n):
t = sp.symbols('t')
numerator = t**((m-1) * (n-1)/2) * (1 - t ** (m + 1) - t**(n + 1) + t**(m+n))
denominator = 1 - t **2
return numerator / denominator
def knot_invariant(x):
#convert the input value into a knot representation (m, n ) fo... | EXA-1-master | exa/modular_components/activations/KNOTX/knot/jones.py |
import jax
import jax.numpy as jnp
@jit
def lorenz(sigma, beta, rho, X, t):
x, y, z = X
xdot = sigma * (y - x)
ydot = X * (rho - z) - y
zdot = x * y - beta * z
return jnp.array([xdot, ydot, zdot])
#since th params are fixed we use a partial to create aa new function that does not ask them
g = p... | EXA-1-master | exa/modular_components/activations/KNOTX/visualizations/lorenz.py |
import numpy as np
import torch
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.integrate import solve_ivp
def convert_to_knot_representation(x):
m = int(np.ceil(x))
n = m + 1
return (m, n)
def knotx(x, device):
x_flat = x.view(-1)
x_flat = x_flat.to(device)
... | EXA-1-master | exa/modular_components/activations/KNOTX/visualizations/knot.py |
import torch.multiprocessing as mp
import time
import copy
class InfinityWarp:
def __init__(self, model, train_data, train_labels, infer_data, train_fn, infer_fn):
self.model = model
self.train_data = train_data
self.train_labels = train_labels
self.infer_data = infer_data
... | EXA-1-master | exa/modular_components/infinityWarp/InfinityWarp.py |
import torch.multiprocessing as mp
import time
import copy
class InfinityWarp:
def __init__(self, model, train_data, train_labels, infer_data):
self.model = model
self.train_data = train_data
self.train_labels = train_labels
self.infer_data = infer_data
def train_model(self... | EXA-1-master | exa/modular_components/infinityWarp/experimental/InfinityWarp3.py |
import multiprocessing
import time
import copy
import torch
class ConcurrentTrainInference:
def __init__(self, model, train_data, train_labels, infer_data):
self.model = model
self.train_data = train_data
self.train_labels = train_labels
self.infer_data = infer_data
def... | EXA-1-master | exa/modular_components/infinityWarp/experimental/InfinityWarp2.py |
import threading
import time
import copy
class ConcurrentTrainInference:
def __init__(self, model, train_data, train_labels, infer_data):
self.model = model
self.train_data = train_data
self.train_labels = train_labels
self.infer_data = infer_data
def train_model(self):
... | EXA-1-master | exa/modular_components/infinityWarp/experimental/infinitywarp.py |
# import torch.multiprocessing as mp
# import time
# import copy
# import
# # Modify the InfinityWarp class
# class InfinityWarp:
# def __init__(self, accelerator, model, train_data, train_labels, infer_data, train_fn, infer_fn):
# self.accelerator = accelerator
# self.model = model
# sel... | EXA-1-master | exa/modular_components/infinityWarp/distributed/InfinityWarp.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
class VisionLanguageEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
def forward(self, textual_tokens, visual_tokens,... | EXA-1-master | exa/modular_components/embedding/OmniMorph/OMNI.py |
import torch
import torch.nn as nn
class VisionLanguageEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
def forward(self, textual_tokens, visual_tokens, **kwargs):
if textual_tok... | EXA-1-master | exa/modular_components/embedding/OmniMorph/OMNI5.py |
import torch
import torch.nn as nn
class VisionLanguageEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
def forward(self, textual_tokens, visual_tokens, **kwargs):
if textual_tok... | EXA-1-master | exa/modular_components/embedding/OmniMorph/OMNI4.py |
import torch
import torch.nn as nn
class OmniMorph(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self._embedding_registry = {}
self._embedding_instances = {}
def register_embedding(self, modality_type, embedding_class):
self._embedding_registry[modality_... | EXA-1-master | exa/modular_components/embedding/OmniMorph/OMNI3.py |
import torch
import torch.nn as nn
class VisionLanguageEmbedding(nn.Module):
def __init__(self, text_embed, vision_embed):
super().__init__()
self.text_embed = text_embed
self.vision_embed = vision_embed
def forward(self, textual_tokens, visual_tokens, **kwargs):
if textual_... | EXA-1-master | exa/modular_components/embedding/OmniMorph/OMNI2.py |
from setuptools import setup, find_packages
setup(
name = 'lion-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.7',
license='MIT',
description = 'Lion Optimizer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url... | EXA-1-master | exa/modular_components/optimizers/lion-pytorch/setup.py |
import torch
try:
import triton
import triton.language as tl
except ImportError as e:
print('triton is not installed, please install by running `pip install triton -U --pre`')
exit()
@triton.autotune(configs = [
triton.Config({'BLOCK_SIZE': 128}, num_warps = 4),
triton.Config({'BLOCK_SIZE': 1... | EXA-1-master | exa/modular_components/optimizers/lion-pytorch/lion_pytorch/triton.py |
from typing import Tuple, Optional, Callable
import torch
from torch.optim.optimizer import Optimizer
# functions
def exists(val):
return val is not None
# update functions
def update_fn(p, grad, exp_avg, lr, wd, beta1, beta2):
# stepweight decay
p.data.mul_(1 - lr * wd)
# weight update
upda... | EXA-1-master | exa/modular_components/optimizers/lion-pytorch/lion_pytorch/lion_pytorch.py |
from lion_pytorch.lion_pytorch import Lion
| EXA-1-master | exa/modular_components/optimizers/lion-pytorch/lion_pytorch/__init__.py |
from setuptools import setup, find_packages
setup(
name = 'CoLT5-attention',
packages = find_packages(),
version = '0.3.4',
license='MIT',
description = 'Conditionally Routed Attention',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url ... | EXA-1-master | exa/modular_components/attentions/CoLT5-attention-main/setup.py |
import math
from functools import partial
import torch
import torch.nn.functional as F
from torch import nn, einsum
from local_attention import LocalMHA
from einops import rearrange, repeat, pack, unpack
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(va... | EXA-1-master | exa/modular_components/attentions/CoLT5-attention-main/colt5_attention/transformer_block.py |
from colt5_attention.transformer_block import (
ConditionalRoutedFeedForward,
ConditionalRoutedAttention,
ConditionalRoutedAutoregressiveAttention,
ConditionalRoutedCrossAttention,
ConditionalRoutedTransformerBlock,
DifferentiableTopKRouter,
SinkhornRouter,
CoordinateDescentRouter
)
fro... | EXA-1-master | exa/modular_components/attentions/CoLT5-attention-main/colt5_attention/__init__.py |
import torch
import torch.nn.functional as F
def exists(val):
return val is not None
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def coor_descent(
s,
*,
n_iters,
k,
eps = 1e-1,
clamp_fn = F.relu,
mask = None,
):
mask_value = -torch.finfo(s.dtype).max
cons... | EXA-1-master | exa/modular_components/attentions/CoLT5-attention-main/colt5_attention/coor_descent.py |
import math
from functools import partial
import torch
import torch.nn.functional as F
from torch import nn, einsum
from local_attention import LocalMHA
from einops import rearrange, repeat, pack, unpack
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(va... | EXA-1-master | exa/modular_components/attentions/CoLT5-attention-main/colt5_attention/flash.py |
import torch
from einops import repeat
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def scatter_mean(src, t, index, dim, eps = 1e-5):
index = repeat(index, '... -> ... d', d = t.shape[-1])
numer = src.scatter_add(dim, index, t)
denom = src.scatter_add(dim, index, torch.ones_like(t))
... | EXA-1-master | exa/modular_components/attentions/CoLT5-attention-main/colt5_attention/sinkhorn.py |
import math
import jax
from functools import partial
from jax import nn
from jax import custom_vjp
from jax import numpy as jnp, lax, jit
# constants
EPSILON = 1e-10
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
# flash attention
def _query_chunk_flash_attention(q_range_chunk, k_range, q, k, v):
q... | EXA-1-master | exa/modular_components/attentions/flashAttentionJAX/casual_flash_attention.py |
import jax
from jax import nn
from jax import jit, numpy as jnp
from jax.numpy import einsum
from einops import rearrange
EPSILON = 1e-10
MASK_VALUE = -1e10
COSINE_SIM_SCALE = 10
@jit
def attention(q, k, v, key_mask):
dim, k_len = q.shape[-1], k.shape[-2]
scale = 1/ jnp.sqrt(dim)
q = q * scale
si... | EXA-1-master | exa/modular_components/attentions/flashAttentionJAX/attention.py |
import math
from functools import partial
import jax
from jax import lax, numpy as jnp, jit
# constants
HIGHEST_PRECISION = jax.lax.Precision.HIGHEST
einsum = partial(jnp.einsum, precision = HIGHEST_PRECISION)
# Figure 1 from https://arxiv.org/abs/2112.05682
# cleaned up
def _query_chunk_attention(q, k, v, k_... | EXA-1-master | exa/modular_components/attentions/flashAttentionJAX/rabe_attention.py |
from flash_attention import flash_attention
from cosine_sim_flash_attention import cosine_sim_flash_attention
from casual_flash_attention import causal_flash_attention
from rabe_attention import rabe_attention
from attention import attention, causal_attention, cosine_sim_attention
from utils import value_and_grad_diff... | EXA-1-master | exa/modular_components/attentions/flashAttentionJAX/__init__.py |
import math
import jax
from functools import partial
from jax import nn
from jax import custom_vjp
from jax import numpy as jnp, lax, jit
#CONSTANTS
EPSILON = 1e-10
MASK_VALUE= -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
COSINE_SIM_SCALE = 10 # this may need to be a functon of log(sequence length) but 15 was s... | EXA-1-master | exa/modular_components/attentions/flashAttentionJAX/cosine_sim_flash_attention.py |
import jax
from functools import partial
import jax.numpy as jnp
from jax import random
from jax import value_and_grad
def value_and_grad_wrapper(fn, **kwargs):
@partial(value_and_grad, **kwargs)
def inner(*args, **kwargs):
return jnp.sum(fn(*args, **kwargs))
return inner
def diff(t1, t2):
... | EXA-1-master | exa/modular_components/attentions/flashAttentionJAX/utils.py |
import math
import jax
from functools import partial
from jax import nn
from jax import custom_vjp
from jax import numpy as jnp, lax, jit
from jax.numpy import einsum
from einops import rearrange
# constants
EPSILON = 1e-10
MASK_VALUE = -1e10
Q_CHUNK_SIZE = 1024
K_CHUNK_SIZE = 1024
# flash attention
def _quer... | EXA-1-master | exa/modular_components/attentions/flashAttentionJAX/flash_attention.py |
import torch
import torch.nn as nn
from torch.nn import LayerNorm
from einops import rearrange
class KernelAttention(nn.Module):
def __init__(self, embed_dim, num_heads, dropout=0.0, kernel="gaussian", sigma=1.0):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
... | EXA-1-master | exa/modular_components/attentions/KernelizedAttention/ka.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import sys
import warnings
import os
from pathlib import Path
from packaging.version import parse, Version
from setuptools import setup, find_packages
import subprocess
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, C... | EXA-1-master | exa/modular_components/attentions/flash-attention/setup.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import sys
import warnings
import os
from packaging.version import parse, Version
from setuptools import setup, find_packages
import subprocess
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
... | EXA-1-master | exa/modular_components/attentions/flash-attention/csrc/ft_attention/setup.py |
# Copied from https://github.com/NVIDIA/apex/tree/master/csrc/megatron
# We add the case where seqlen = 4k and seqlen = 8k
import os
import subprocess
import torch
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME
def get_cuda_bare_metal_version(cuda_dir):
... | EXA-1-master | exa/modular_components/attentions/flash-attention/csrc/fused_softmax/setup.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import sys
import warnings
import os
from packaging.version import parse, Version
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
#... | EXA-1-master | exa/modular_components/attentions/flash-attention/csrc/xentropy/setup.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import sys
import warnings
import os
from packaging.version import parse, Version
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
#... | EXA-1-master | exa/modular_components/attentions/flash-attention/csrc/layer_norm/setup.py |
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import sys
import warnings
import os
from packaging.version import parse, Version
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
#... | EXA-1-master | exa/modular_components/attentions/flash-attention/csrc/rotary/setup.py |
import os
import subprocess
from packaging.version import parse, Version
import torch
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], univers... | EXA-1-master | exa/modular_components/attentions/flash-attention/csrc/fused_dense_lib/setup.py |
from typing import Callable
import dotenv
import hydra
from omegaconf import OmegaConf, DictConfig
# load environment variables from `.env` file if it exists
# recursively searches for `.env` in all folders starting from work dir
dotenv.load_dotenv(override=True)
OmegaConf.register_new_resolver('eval', eval)
OmegaCo... | EXA-1-master | exa/modular_components/attentions/flash-attention/training/run.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.