python_code stringlengths 0 4.04M | repo_name stringlengths 7 58 | file_path stringlengths 5 147 |
|---|---|---|
"""Custom video dataloader for ImageNet-P dataset, which comes in .mp4."""
import cv2
# from skvideo.io import VideoCapture
# import skvideo.io
import torch
import torch.utils.data as data
from torchvision.datasets.folder import DatasetFolder
from PIL import Image
import os
import os.path
import sys
class VideoFol... | state-spaces-main | src/dataloaders/utils/video_loader.py |
"""Implementation of CIFAR augmentations. Not currently used.
Borrowed from https://github.com/hysts/pytorch_image_classification/tree/9ff4248905850c68aa9c09c17914307eb81769e7/pytorch_image_classification/transforms
"""
import torch
import numpy as np
import PIL
import PIL.Image
from PIL.Image import Image
class Np... | state-spaces-main | src/dataloaders/utils/cifar_augmentations.py |
"""Implementation of Mixup from timm."""
import torch
from timm.data import Mixup
from timm.data.mixup import mixup_target
class TimmMixup(Mixup):
"""Wrap timm.data.Mixup that avoids the assert that batch size must be even."""
def __call__(self, x, target, *args):
if self.mode == 'elem':
... | state-spaces-main | src/dataloaders/utils/timm_mixup.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by... | state-spaces-main | src/dataloaders/utils/vocabulary.py |
"""Utilities for special optimizer hyperparameters.
`group_parameters_for_optimizer` is a modification of timm's optimizer logic, which is currently unused.
`add_optimizer_hooks` is an improved version that uses this codebase's _optim dictionary.
"""
import inspect
import torch.nn as nn
import hydra
def add_optimiz... | state-spaces-main | src/utils/optim_groups.py |
"""Utilities for dealing with collection objects (lists, dicts) and configs."""
from typing import Sequence, Mapping, Optional, Callable
import functools
import hydra
from omegaconf import ListConfig, DictConfig
# TODO this is usually used in a pattern where it's turned into a list, so can just do that here
def is_li... | state-spaces-main | src/utils/config.py |
optimizer = {
"adam": "torch.optim.Adam",
"adamw": "torch.optim.AdamW",
"rmsprop": "torch.optim.RMSprop",
"sgd": "torch.optim.SGD",
"lamb": "src.utils.optim.lamb.JITLamb",
}
scheduler = {
"constant": "transformers.get_constant_schedule",
"plateau": "torch.optim.lr... | state-spaces-main | src/utils/registry.py |
from .config import is_list, is_dict, to_list, to_dict, get_class, instantiate
| state-spaces-main | src/utils/__init__.py |
import math
import numpy as np
import torch
### Bit reversal permutation
def bitreversal_po2(n):
m = int(math.log(n)/math.log(2))
perm = np.arange(n).reshape(n,1)
for i in range(m):
n1 = perm.shape[0]//2
perm = np.hstack((perm[:n1],perm[n1:]))
return perm.squeeze(0)
def bitreversal_p... | state-spaces-main | src/utils/permutations.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by... | state-spaces-main | src/utils/distributed.py |
"""Utils for the training loop. Adapted from https://github.com/HazyResearch/transformers/blob/master/src/utils/utils.py."""
import logging
import os
import warnings
from typing import List, Sequence
import pytorch_lightning as pl
import rich.syntax
import rich.tree
from omegaconf import DictConfig, OmegaConf
from pyt... | state-spaces-main | src/utils/train.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by... | state-spaces-main | src/utils/optim/lamb.py |
"""Wrapper of optimizers in torch.optim for computation of exponential moving average of parameters.
Source: https://github.com/kamenbliznashki/pixel_models/blob/master/optim.py
"""
import torch
def build_ema_optimizer(optimizer_cls):
class Optimizer(optimizer_cls):
def __init__(self, *args, polyak=0.0, ... | state-spaces-main | src/utils/optim/ema.py |
"""Custom learning rate schedulers."""
import math
import warnings
import torch
from timm.scheduler import CosineLRScheduler
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html
class CosineWarmup(torch.optim.lr_scheduler.CosineAnnealingLR):
def __init__(self, optimizer, T_max, eta_min=0, w... | state-spaces-main | src/utils/optim/schedulers.py |
"""Implementations of different types of residual functions."""
import torch
from torch import nn
class Residual(nn.Module):
"""Residual connection with constant affine weights.
Can simulate standard residual, no residual, and "constant gates".
"""
def __init__(self, i_layer, d_input, d_model, alpha... | state-spaces-main | src/models/nn/residual.py |
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by... | state-spaces-main | src/models/nn/adaptive_softmax.py |
"""Linear nn components."""
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from src.models.nn.activation import Activation
contract = torch.einsum
def get_initializer(name, activation=None):
if activation in [ None, 'id',... | state-spaces-main | src/models/nn/linear.py |
from .linear import LinearActivation, TransposedLinear
from .activation import Activation
from .normalization import Normalization
from .dropout import DropoutNd, StochasticDepth
| state-spaces-main | src/models/nn/__init__.py |
"""Original from Transformer-XL as a hook for their initialization. Currently not used."""
import torch
from torch import nn
def init_weight(weight, init_cfg):
if init_cfg.init == 'uniform':
nn.init.uniform_(weight, -init_cfg.init_range, init_cfg.init_range)
elif init_cfg.init == 'normal':
nn.... | state-spaces-main | src/models/nn/initialization.py |
"""Utilities for activation functions."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
def Activation(activation=None, size=None, dim=-1):
if activation in [ None, 'id', 'identity', 'linear', 'none' ]:
return nn.Identity()
elif activation == 'tanh':
return nn.... | state-spaces-main | src/models/nn/activation.py |
"""Utility wrappers around modules to let them handle extra arguments."""
import inspect
from functools import wraps
import torch
from torch import nn
def wrap_kwargs(f):
"""Wrap a Callable to pass through extra arguments.
Given a callable f that can consume some named arguments,
wrap it with a kwargs th... | state-spaces-main | src/models/nn/utils.py |
"""Defines flexible gating mechanisms.
Based on ideas from LSSL paper and UR-LSTM paper (https://arxiv.org/abs/1910.09890).
"""
import torch
import torch.nn as nn
class Gate(nn.Module):
"""Implements gating mechanisms.
LSSL paper elaborates on the most import connection: A standard sigmoid gate
is equiv... | state-spaces-main | src/models/nn/gate.py |
"""Wrapper around expRNN's Orthogonal class for convenience."""
from .exprnn.orthogonal import Orthogonal
from .exprnn.trivializations import expm, cayley_map
from .exprnn.initialization import henaff_init_, cayley_init_
param_name_to_param = {'cayley': cayley_map, 'expm': expm}
init_name_to_init = {'henaff': henaff_... | state-spaces-main | src/models/nn/orthogonal.py |
"""Utility nn components, in particular handling activations, initializations, and normalization layers."""
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
def stochastic_depth(input: torch.tensor, p: float, mode: str, training: bool = Tru... | state-spaces-main | src/models/nn/dropout.py |
"""Implementations of several types of Discrete Sin/Cosine Transforms with various reductions to FFT.
Currently not used by S4.
"""
import torch
import torch.nn as nn
import numpy as np
import scipy.fft
from einops import rearrange, repeat
class DCT(nn.Module):
"""Reductions adapted from https://dsp.stackexchang... | state-spaces-main | src/models/nn/dxt.py |
"""Normalization modules."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
class Normalization(nn.Module):
def __init__(
self,
d,
transposed=False, # Length dimension is -1 or -2
_name_='layer',
**kwargs
):
super... | state-spaces-main | src/models/nn/normalization.py |
# Downloaded from https://github.com/Lezcano/expRNN
#
# Authors: Travis Oliphant, March 2002
# Anthony Scopatz, August 2012 (Sparse Updates)
# Jake Vanderplas, August 2012 (Sparse Updates)
#
"""Adaptation of expm and expm_frechet in numpy for torch."""
from __future__ import division, print_function... | state-spaces-main | src/models/nn/exprnn/expm32.py |
# Downloaded from https://github.com/Lezcano/expRNN
import torch
import numpy as np
import scipy.linalg as la
def henaff_init_(A):
size = A.size(0) // 2
diag = A.new(size).uniform_(-np.pi, np.pi)
return create_diag_(A, diag)
def cayley_init_(A):
size = A.size(0) // 2
diag = A.new(size).uniform_... | state-spaces-main | src/models/nn/exprnn/initialization.py |
# Adapted from https://github.com/Lezcano/expRNN
import torch
import torch.nn as nn
from .parametrization import Parametrization
from src.models.nn.activation import ModReLU
class Orthogonal(Parametrization):
"""Class that implements optimization restricted to the Stiefel manifold."""
def __init__(self, d_... | state-spaces-main | src/models/nn/exprnn/orthogonal.py |
# Downloaded from https://github.com/Lezcano/expRNN
import torch
from .expm32 import expm32, differential
def cayley_map(X):
n = X.size(0)
Id = torch.eye(n, dtype=X.dtype, device=X.device)
return torch.solve(Id - X, Id + X)[0]
class expm_class(torch.autograd.Function):
@staticmethod
def forward(... | state-spaces-main | src/models/nn/exprnn/trivializations.py |
# Downloaded from https://github.com/Lezcano/expRNN
import torch
import torch.nn as nn
def get_parameters(model):
parametrized_params = []
def get_parametrized_params(mod):
nonlocal parametrized_params
if isinstance(mod, Parametrization):
parametrized_params.append(mod.A)
de... | state-spaces-main | src/models/nn/exprnn/parametrization.py |
"""Utilities to calculate the transitions of the HiPPO ODE x' = Ax + Bu and discrete-time recurrence approximation.
Note that these modules were heavily used in LSSL, but is no longed needed for S4.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from scipy import special as ... | state-spaces-main | src/models/hippo/transition.py |
"""Standalone implementation of HiPPO operators.
Contains experiments for the function reconstruction experiment in original HiPPO paper,
as well as new animations from "How to Train Your HiPPO".
This file ports the notebook notebooks/hippo_function_approximation.ipynb,
which is recommended if Jupyter is supported.
"... | state-spaces-main | src/models/hippo/visualizations.py |
"""Definitions of A and B matrices for various HiPPO operators."""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from scipy import special as ss
from einops import rearrange, repeat
contract = torch.einsum
def embed_c2r(A):
A = rearrange(A, '... m n -> ... m () n ()')
... | state-spaces-main | src/models/hippo/hippo.py |
from .base import SequenceModule, TransposedModule
| state-spaces-main | src/models/sequence/__init__.py |
"""Defines base class SequenceModule, a modular interface for sequence models."""
from torch import nn
import functools
class SequenceModule(nn.Module):
"""Abstract sequence model class. All models must adhere to this interface.
A SequenceModule is generally a model that transforms an input of shape
(n_ba... | state-spaces-main | src/models/sequence/base.py |
"""Wrapper around nn.Conv1d to adhere to SequenceModule interface."""
import torch
import torch.nn.functional as F
from torch import nn
import hydra
from models.sequence.base import SequenceModule
from einops import rearrange
import src.models.nn.utils as U
from src.models.nn import Activation
class Conv1d(SequenceM... | state-spaces-main | src/models/sequence/convs/conv1d.py |
"""Wrapper around nn.Conv2d to adhere to SequenceModule interface."""
import torch
from torch import nn
from src.models.sequence.base import SequenceModule
from src.models.nn import Activation, DropoutNd
class Conv2d(SequenceModule):
""" Simple wrapper for nn.Conv1d """
def __init__(self, d_model, d_output=N... | state-spaces-main | src/models/sequence/convs/conv2d.py |
"""Module for FFT convolution that accepts a flexible kernel parameterization."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from src.models.sequence import SequenceModule
from src.models.sequence.kernels import registry as kernel_registry
from src.models.n... | state-spaces-main | src/models/sequence/kernels/fftconv.py |
"""Construct wide convolution kernels."""
from typing import Optional, Mapping, Tuple, Union
from collections import defaultdict
import math
import torch
import torch.nn as nn
import src.utils.train
log = src.utils.train.get_logger(__name__)
class Kernel(nn.Module):
"""Interface for modules that produce convolu... | state-spaces-main | src/models/sequence/kernels/kernel.py |
from .kernel import ConvKernel, EMAKernel
from .ssm import SSMKernelDense, SSMKernelReal, SSMKernelDiag, SSMKernelDPLR
registry = {
'conv': ConvKernel,
'ema': EMAKernel,
'dense': SSMKernelDense,
'slow': SSMKernelDense,
'real': SSMKernelReal,
's4d': SSMKernelDiag,
'diag': SSMKernelDiag,
... | state-spaces-main | src/models/sequence/kernels/__init__.py |
"""SSM convolution kernels.
SSMKernelDPLR is the S4 kernel, implementing the 'diagonal plus low-rank' algorithm from the original S4 paper. This stores parameters A, B, C, dt, and calling it creates the SSM convolution kernel bar{K}.
SSMKernelDense is a much simpler version included for illustration purposes. It has ... | state-spaces-main | src/models/sequence/kernels/ssm.py |
"""Initializations of structured state space (S4) models with diagonal plus low rank (DPLR) parameterization."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
import src.models.hippo.hippo as hippo
import src.utils.train
log = src.utils.train.get_lo... | state-spaces-main | src/models/sequence/kernels/dplr.py |
# Adapted from https://github.com/HazyResearch/zoo
# in turn adapted from https://github.com/idiap/fast-transformers/blob/master/fast_transformers/feature_maps/fourier_features.py
"""Implementation of Performer model (https://arxiv.org/abs/2009.14794)."""
import math
import torch
from einops import rearrange, repeat
... | state-spaces-main | src/models/sequence/attention/performer.py |
"""Implement linear attention.
From github.com/HazyResearch/transformers
"""
from functools import partial
from contextlib import contextmanager
import torch
import torch.nn as nn
import hydra
from einops import rearrange
from fast_transformers.feature_maps import elu_feature_map
from fast_transformers.masking import... | state-spaces-main | src/models/sequence/attention/linear.py |
"""Wrapper around nn.MultiheadAttention to adhere to SequenceModule interface."""
import torch
import torch.nn.functional as F
from torch import nn
import hydra
from models.sequence.base import SequenceModule, TransposedModule
import src.models.nn.utils as U
from einops import rearrange
@TransposedModule
class Multih... | state-spaces-main | src/models/sequence/attention/mha.py |
# Expose the cell registry and load all possible cells
from .cells.basic import CellBase
from .cells import basic
from .cells import hippo
from .cells import timestamp
from . import sru
| state-spaces-main | src/models/sequence/rnns/__init__.py |
"""Implements variant of HiPPO-RNN that doesn't feed the hidden and memory states into each other time-wise, instead using simpler linear recurrences in time and letting them interact depthwise.
[21-10-22] AG: This was old experimental code. It should still work (perhaps with some minimal modifications), but there is ... | state-spaces-main | src/models/sequence/rnns/qrnn.py |
import torch
import torch.nn as nn
import src.utils as utils
from src.models.sequence.rnns.cells import CellBase
from src.models.sequence import SequenceModule
# [21-09-12 AG]: We previously set up a way to register RNNCell classes, which gives them a "local" name
# To convert this mapping from name to constructor, w... | state-spaces-main | src/models/sequence/rnns/rnn.py |
"""Implementation of the Simple Recurrent Unit.
https://arxiv.org/abs/1709.02755
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from src.models.sequence.rnns.cells import CellBase
from src.models.nn import LinearActivation
import src.models.nn.utils as U
from src.... | state-spaces-main | src/models/sequence/rnns/sru.py |
"""The core RNN cell architecture of the HiPPO-RNN from the original HiPPO paper."""
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from scipy import signal
from scipy import linalg as la
from src.models.sequence.rnns.cells.basic import RNNCell
from src.models.nn imp... | state-spaces-main | src/models/sequence/rnns/cells/memory.py |
"""Implementation of the 'MinimalRNN', which a reviewer from NeurIPS2020 asked us to compare against.
https://arxiv.org/abs/1711.06788
[21-10-22] I believe this has not been tested in awhile but should work with minimal modifications
"""
from src.models.sequence.rnns.cells.basic import CellBase
from src.models.nn im... | state-spaces-main | src/models/sequence/rnns/cells/minimalrnn.py |
from .basic import CellBase
| state-spaces-main | src/models/sequence/rnns/cells/__init__.py |
"""Baseline simple RNN cells such as the vanilla RNN and GRU."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.models.nn import LinearActivation, Activation # , get_initializer
from src.models.nn.gate import Gate
from src.models.nn.orthogonal import OrthogonalLinear
from src.models.seque... | state-spaces-main | src/models/sequence/rnns/cells/basic.py |
"""Implementation of full HiPPO-RNN variants."""
import torch
import torch.nn as nn
from torch.nn import functional as F
import numpy as np
from src.models.sequence.rnns.cells.memory import LTICell, LSICell
from src.models.hippo.hippo import transition
class HiPPOLTICell(LTICell):
measure = None
def __init... | state-spaces-main | src/models/sequence/rnns/cells/hippo.py |
"""Variants of the HiPPO-RNN that accept timestamped inputs and evolve according to the elapsed time between inputs. Used in original HiPPO paper for irregularly-sampled CharacterTrajectories experiments."""
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from src.mode... | state-spaces-main | src/models/sequence/rnns/cells/timestamp.py |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.models.sequence.base import SequenceModule
from src.models.sequence.modules.pool import DownPool, UpPool
from src.models.sequence.backbones.block import SequenceResidualBlock
class Sashimi(SequenceModule):
def __init__(
... | state-spaces-main | src/models/sequence/backbones/sashimi.py |
"""Different deep backbone that is essentially a 1-D UNet instead of ResNet/Transformer backbone.
Sequence length gets downsampled through the depth of the network while number of feature increases.
Then sequence length gets upsampled again (causally) and blocks are connected through skip connections.
"""
import math... | state-spaces-main | src/models/sequence/backbones/unet.py |
"""Isotropic deep sequence model backbone, in the style of ResNets / Transformers.
The SequenceModel class implements a generic (batch, length, d_input) -> (batch, length, d_output) transformation.
"""
from functools import partial
from typing import Mapping, Optional
import torch
import torch.nn as nn
from einops i... | state-spaces-main | src/models/sequence/backbones/model.py |
"""Implements a full residual block around a black box layer.
Configurable options include:
normalization position: prenorm or postnorm
normalization type: batchnorm, layernorm etc.
subsampling/pooling
residual options: feedforward, residual, affine scalars, depth-dependent scaling, etc.
"""
from functools import par... | state-spaces-main | src/models/sequence/backbones/block.py |
"""Implementation of S4ND module (https://arxiv.org/abs/2210.06583)."""
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat, reduce
from src.models.sequence import SequenceModule
from src.models.sequence.kernels import registry as kerne... | state-spaces-main | src/models/sequence/modules/s4nd.py |
"""Implementation of modular block design used in S4. Compatible with other kernels."""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils as U
from functools import partial
from einops import rearrange, repeat
from src.models.nn import LinearActivation, Activation, DropoutNd
fro... | state-spaces-main | src/models/sequence/modules/s4block.py |
"""Implementation of FFN block in the style of Transformers."""
from functools import partial
from torch import nn
from src.models.sequence.base import SequenceModule
from src.models.nn import LinearActivation, DropoutNd
class FFN(SequenceModule):
def __init__(
self,
d_input,
e... | state-spaces-main | src/models/sequence/modules/ffn.py |
# Adapted from https://github.com/facebookresearch/mega/blob/ea355255149d38ffe16bf2c176d47c3864e8b05a/fairseq/modules/moving_average_gated_attention.py
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root di... | state-spaces-main | src/models/sequence/modules/megablock.py |
"""Implementation of LSSL module. Succeeded by S4."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from omegaconf import DictConfig
from src.models.nn import Activation
from src.models.functional.krylov import krylov
from src.models.hippo import t... | state-spaces-main | src/models/sequence/modules/lssl.py |
"""Implements downsampling and upsampling on sequences."""
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, repeat, reduce
from src.models.sequence import SequenceModule
from src.models.nn import LinearActivation
"""The following pooling modules all subscribe to the sa... | state-spaces-main | src/models/sequence/modules/pool.py |
"""PyTorch ResNet implementations.
This started as a copy of https://github.com/pytorch/vision 'resnet.py' (BSD-3-Clause) with
additional dropout and dynamic global avg/max pool.
ResNeXt, SE-ResNeXt, SENet, and MXNet Gluon stem/downsample variants, tiered stems added by Ross Wightman
Copyright 2019, Ross Wightman
"""
... | state-spaces-main | src/models/baselines/resnet_timm.py |
# Copyright 2021 The ODE-LSTM Authors. All Rights Reserved.
"""Adapted from ODE-LSTM https://github.com/mlech26l/ode-lstms/."""
import torch
import torch.nn as nn
from torchdyn.models import NeuralDE
import pytorch_lightning as pl
from torchmetrics.functional import accuracy
class ODELSTMCell(nn.Module):
def __i... | state-spaces-main | src/models/baselines/odelstm.py |
"""The original Vision Transformer (ViT) from timm.
Copyright 2020 Ross Wightman.
"""
import math
import logging
from functools import partial
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.helpers import build_model... | state-spaces-main | src/models/baselines/vit_all.py |
"""Adapted from https://github.com/vincentherrmann/pytorch-wavenet."""
import os
import os.path
import time
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable, Function
import numpy as np
from src.models.sequence.base import SequenceModule
def mu_law_ex... | state-spaces-main | src/models/baselines/wavenet.py |
"""Neural Rough Differential Equations."""
import torch
from torch import nn
from torchdiffeq import odeint, odeint_adjoint
import bisect
def rdeint(logsig, h0, func, method='rk4', adjoint=False, return_sequences=False):
"""Analogous to odeint but for RDEs.
Note that we do not have time intervals here. This ... | state-spaces-main | src/models/baselines/nrde.py |
"""Implementation of UnICORNN model.
Adapted from https://github.com/tk-rusch/unicornn/blob/main/health_care/network.py.
Original docstring:
This code implements a fast CUDA version of the stacked UnICORNN model.
We emphasise that this code builds up on the fast CUDA implementation of the IndRNN https://github.com/Su... | state-spaces-main | src/models/baselines/unicornn.py |
"""Reproduction of ViT. Currently not used in favor of timm ViT."""
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat
from einops.layers.torch import Rearrange
from src.models.sequence.base import SequenceModule
class Residual(nn.Module):
def __init__... | state-spaces-main | src/models/baselines/vit.py |
"""2D ResNet baselines from torchvision."""
import torch.nn as nn
import torchvision.models as models
from einops import rearrange
class TorchVisionResnet(nn.Module):
def __init__(
self,
variant="resnet18", # e.g. [ "resnet18" | "resnet34" | "resnet50" | "wide_resnet50_2" ]
):
super(... | state-spaces-main | src/models/baselines/resnet.py |
"""End-to-end classification Transformer adapted from PyTorch examples.
The isotropic model backbone should subsume this architecture. See config configs/model/transformer.yaml
"""
import copy
from typing import Optional, Any
from typing import Tuple
import torch
from torch import Tensor
from torch.nn import Module
... | state-spaces-main | src/models/baselines/transformer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the MIT license
"""ConvNext TIMM version with S4ND integration.
Paper: `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf
Original code and weights from https://github.com/facebookresearch/Con... | state-spaces-main | src/models/baselines/convnext_timm.py |
"""Adapted from LipschitzRNN https://github.com/erichson/LipschitzRNN.
Original code left as comments
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from src.models.sequence.base import SequenceModule
from copy import deepcopy
from torch... | state-spaces-main | src/models/baselines/lipschitzrnn.py |
"""Implementation of SampleRNN model.
Paper: https://arxiv.org/abs/1612.07837
"""
import torch
import torch.nn.functional as F
from torch.nn import init
import math
import numpy as np
from src.models.baselines.lstm import TorchLSTM
from src.models.baselines.gru import TorchGRU
from src.models.sequence.base import Se... | state-spaces-main | src/models/baselines/samplernn.py |
"""Wrapper around nn.LSTM to make it compatible with our RNN interface."""
import torch
from torch import nn
from src.models.sequence import SequenceModule, TransposedModule
from einops import rearrange
import src.models.nn.utils as U
@TransposedModule
class TorchLSTM(nn.LSTM, SequenceModule):
""" Wrapper around ... | state-spaces-main | src/models/baselines/lstm.py |
"""Wrapper around nn.GRU to make it compatible with our RNN interface. Similar to lstm.TorchLSTM."""
import torch
from torch import nn
from src.models.sequence import SequenceModule, TransposedModule
from einops import rearrange
import src.models.nn.utils as U
@TransposedModule
class TorchGRU(nn.GRU, SequenceModule):... | state-spaces-main | src/models/baselines/gru.py |
"""Implementation of Continuous Kernel Convolution (CKConv).
Paper: https://arxiv.org/abs/2102.02611
Adapted directly from https://github.com/dwromero/ckconv.
"""
from typing import Tuple, Optional
import numpy as np
import torch
import torch.fft
import torch.fft
import torch.nn as nn
import torch.nn.functional as f... | state-spaces-main | src/models/baselines/ckconv.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from typing import Optional, Collection
##############################################################################################################################################
# utility functions
def listify(p=None, q=None):
#... | state-spaces-main | src/models/baselines/nonaka/basic_conv1d.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.models.baselines.nonaka.basic_conv1d import create_head1d, Flatten
from enum import Enum
import re
# delegates
import inspect
def delegates(to=None, keep=False):
"Decorator: replace `**kwargs` in signature with params from `to`"
d... | state-spaces-main | src/models/baselines/nonaka/xresnet.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from src.models.baselines.nonaka.basic_conv1d import AdaptiveConcatPool1d, create_head1d
########################################################################################################
# Inception time inspired by https://github.c... | state-spaces-main | src/models/baselines/nonaka/inception.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from src.models.baselines.nonaka.basic_conv1d import create_head1d, Flatten
###############################################################################################
# Standard resnet
def conv(in_planes, out_planes, stride=1, kernel_... | state-spaces-main | src/models/baselines/nonaka/resnet.py |
"""pykeops implementations of the Vandermonde matrix multiplication kernel used in the S4D kernel."""
import torch
from einops import rearrange, repeat
contract = torch.einsum
try:
import pykeops
from pykeops.torch import LazyTensor, Genred
except:
pass
_conj = lambda x: torch.cat([x, x.conj()], dim=-1)... | state-spaces-main | src/models/functional/vandermonde.py |
"""Old utilities for parallel scan implementation of Linear RNNs."""
# TODO this file could use much cleanup
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from src.models.functional.toeplitz import triangular_toeplitz_multiply, triangular_toeplitz_multiply_padded
fr... | state-spaces-main | src/models/functional/unroll.py |
"""pykeops implementations of the core Cauchy kernel used in the S4 algorithm.
The interface of the Cauchy multiplication is:
Inputs:
v: (N)
z: (N)
w: (L)
Returns: y (L)
y_k = \sum_i v_i / (z_i - w_k)
"""
import torch
from einops import rearrange
try:
import pykeops
from pykeops.torch import LazyTe... | state-spaces-main | src/models/functional/cauchy.py |
"""Compute a Krylov function efficiently.
Note that LSSL called this a Krylov function for lack of better terminology,
while S4 renames the Krylov function to a "state space kernel".
An existing term in the literature is "Markov parameters" of an SSM.
The interface for this function is:
Inputs:
A : (N, N)
B : (N,... | state-spaces-main | src/models/functional/krylov.py |
"""Utilities for computing convolutions.
There are 3 equivalent views:
1. causal convolution
2. multiplication of (lower) triangular Toeplitz matrices
3. polynomial multiplication (mod x^N)
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
def construct_toeplitz(v, f=0.0):
"""Exp... | state-spaces-main | src/models/functional/toeplitz.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import csv
from pathlib import Path
import pandas as pd
XX_EN_LANGUAGES = {
"1": ["fr", "de", ... | covost-main | get_covost_splits.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
import os.path as op
import urllib.request
from tqdm import tqdm
LANG_CODE_2_TO_3 = {
'fr'... | covost-main | get_tt_speech.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
### useage ###
# (run w/ gpu): python dempStream.py --tx_cuda 1 --rx_cuda... | AudioDec-main | demoStream.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
### useage ###
# (run w/ gpu): python demoFile.py --model libritts_v1 -i ... | AudioDec-main | demoFile.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
import os... | AudioDec-main | codecStatistic.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
import os... | AudioDec-main | codecTrain.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
import os... | AudioDec-main | codecTest.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Adversarial loss modules."""
import torch
import torch.nn.functional as F
class GeneratorAdversarialLoss(torch.nn.Module):
"""Generator adversarial loss module."""
def __i... | AudioDec-main | losses/adversarial_loss.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Reference (https://github.com/kan-bayashi/ParallelWaveGAN/)
"""STFT-b... | AudioDec-main | losses/stft_loss.py |
from .adversarial_loss import * # NOQA
from .feat_match_loss import * # NOQA
from .mel_loss import * # NOQA
from .stft_loss import * # NOQA
from .waveform_loss import * # NOQA | AudioDec-main | losses/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.