python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/__init__.py
import torch import torch.nn as nn from functools import partial from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test from torch.utils.checkpoint import checkpoint from transformers import T5Tokenizer, T5Encoder...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/modules.py
# PANNs: Large-Scale Pretrained Audio Neural Networks for Audio Pattern Recognition # Reference from https://github.com/qiuqiangkong/audioset_tagging_cnn # Some layers are re-designed for CLAP import os os.environ['NUMBA_CACHE_DIR'] = '/tmp/' import torch import torch.nn as nn import torch.nn.functional as F from torc...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/pann_model.py
''' Feature Fusion for Varible-Length Data Processing AFF/iAFF is referred and modified from https://github.com/YimianDai/open-aff/blob/master/aff_pytorch/aff_net/fusion.py According to the paper: Yimian Dai et al, Attentional Feature Fusion, IEEE Winter Conference on Applications of Computer Vision, WACV 2021 ''' imp...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/feature_fusion.py
import hashlib import os import urllib import warnings from tqdm import tqdm _RN50 = dict( openai="https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt", yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-q...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/pretrained.py
__version__ = '0.2.1'
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/version.py
import numpy as np import torch.nn.functional as F from torch import nn from .model import MLPLayers class LinearProbe(nn.Module): def __init__(self, model, mlp, freeze, in_ch, out_ch, act=None): """ Args: model: nn.Module mlp: bool, if True, then use the MLP layer as the l...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/linear_probe.py
from .factory import list_models, create_model, create_model_and_transforms, add_model_config from .loss import ClipLoss, gather_features, LPLoss, lp_gather_features, LPMetrics from .model import CLAP, CLAPTextCfg, CLAPVisionCfg, CLAPAudioCfp, convert_weights_to_fp16, trace_model from .openai import load_openai_model, ...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/__init__.py
import json import logging import os import pathlib import re from copy import deepcopy from pathlib import Path import torch from .model import CLAP, convert_weights_to_fp16 from .openai import load_openai_model from .pretrained import get_pretrained_url, download_pretrained from .transform import image_transform _...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/factory.py
""" CLAP Model Adapted from CLIP: https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. Adapted to the Audio Task. """ from collections import OrderedDict from dataclasses import dataclass from email.mime import audio from typing import Tuple, Union, Callable, Optional import numpy as np...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/model.py
# Ke Chen # knutchen@ucsd.edu # HTS-AT: A HIERARCHICAL TOKEN-SEMANTIC AUDIO TRANSFORMER FOR SOUND CLASSIFICATION AND DETECTION # Some layers designed on the model # below codes are based and referred from https://github.com/microsoft/Swin-Transformer # Swin Transformer for Computer Vision: https://arxiv.org/pdf/2103.14...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/htsat.py
""" CLIP tokenizer Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ import gzip import html import os from functools import lru_cache from typing import Union, List import ftfy import regex as re import torch @lru_cache() def default_bpe(): return os.path.join(o...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/tokenizer.py
from multiprocessing.sharedctypes import Value import torch import torch.distributed.nn from torch import distributed as dist, nn as nn from torch.nn import functional as F import numpy as np from sklearn.metrics import average_precision_score, roc_auc_score, accuracy_score try: import horovod.torch as hvd except...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/loss.py
""" OpenAI pretrained model functions Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ import os import warnings from typing import Union, List import torch from .model import build_model_from_openai_state_dict from .pretrained import get_pretrained_url, list_pretr...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/openai.py
import numpy as np import torch from torch import nn as nn from torchvision.ops.misc import FrozenBatchNorm2d import logging import h5py from tqdm import tqdm import random import json import os import pathlib # TODO: (yusong) this not a good place to store those information and does not scale. Need to be fixed later....
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/utils.py
from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \ CenterCrop def _convert_to_rgb(image): return image.convert('RGB') def image_transform( image_size: int, is_train: bool, mean=(0.48145466, 0.4578275, 0.40821073), ...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/transform.py
""" timm model adapter Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model. """ from collections import OrderedDict import torch.nn as nn try: import timm from timm.models.layers import Mlp, to_2tuple from timm.models.layers.attention_pool2d impor...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/timm_model.py
from transformers import BertTokenizer, BertModel tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') model = BertModel.from_pretrained("bert-base-uncased") text = "Replace me by any text you'd like." def bert_embeddings(text): # text = "Replace me by any text you'd like." encoded_input = tokenizer(...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/bert.py
import numpy as np import torch import torch.nn.functional as F from torch import nn from transformers import AutoModel from .audio import get_audio_encoder class Projection(nn.Module): def __init__(self, d_in: int, d_out: int, p: float=0.5) -> None: super().__init__() self.linear1 = nn.Linear(d_in...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/CLAP/clap.py
from . import clap from . import audio from . import utils
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/CLAP/__init__.py
import argparse import yaml import sys def read_config_as_args(config_path,args=None,is_config_str=False): return_dict = {} if config_path is not None: if is_config_str: yml_config = yaml.load(config_path, Loader=yaml.FullLoader) else: with open(config_path, "r") as f: ...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/CLAP/utils.py
import torch import torch.nn as nn import torch.nn.functional as F from torchlibrosa.stft import Spectrogram, LogmelFilterBank def get_audio_encoder(name: str): if name == "Cnn14": return Cnn14 else: raise Exception('The audio encoder name {} is incorrect or not supported'.format(name)) class...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/CLAP/audio.py
import random import torchaudio from torch._six import string_classes import collections import re import torch.nn.functional as F import numpy as np from transformers import AutoTokenizer from ldm.modules.encoders.CLAP.utils import read_config_as_args from ldm.modules.encoders.CLAP.clap import CLAP import math import ...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/CLAP/CLAPWrapper.py
# -*- coding: utf-8 -*- """ # -------------------------------------------- # Super-Resolution # -------------------------------------------- # # Kai Zhang (cskaizhang@gmail.com) # https://github.com/cszn # From 2019/03--2021/08 # -------------------------------------------- """ import numpy as np import cv2 import tor...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/image_degradation/bsrgan.py
from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/image_degradation/__init__.py
import os import math import random import numpy as np import torch import cv2 from torchvision.utils import make_grid from datetime import datetime #import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" ''' # ----------------------...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/image_degradation/utils_image.py
# -*- coding: utf-8 -*- import numpy as np import cv2 import torch from functools import partial import random from scipy import ndimage import scipy import scipy.stats as ss from scipy.interpolate import interp2d from scipy.linalg import orth import albumentations import ldm.modules.image_degradation.utils_image as ...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/image_degradation/bsrgan_light.py
# adopted from # https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py # and # https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py # and # https://github.com/openai/gu...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/diffusionmodules/util.py
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/diffusionmodules/__init__.py
# pytorch_diffusion + derived encoder decoder import math import torch import torch.nn as nn import numpy as np from einops import rearrange from ldm.util import instantiate_from_config from ldm.modules.attention import LinearAttention def get_timestep_embedding(timesteps, embedding_dim): """ This matches th...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/diffusionmodules/model.py
from abc import abstractmethod from functools import partial import math from typing import Iterable import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F from ldm.modules.diffusionmodules.util import ( checkpoint, conv_nd, linear, avg_pool_nd, zero_module, ...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/diffusionmodules/custom_openaimodel.py
from abc import abstractmethod from functools import partial import math from typing import Iterable import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F from ldm.modules.diffusionmodules.util import ( checkpoint, conv_nd, linear, avg_pool_nd, zero_module, ...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/diffusionmodules/openaimodel.py
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/distributions/__init__.py
import torch import numpy as np class AbstractDistribution: def sample(self): raise NotImplementedError() def mode(self): raise NotImplementedError() class DiracDistribution(AbstractDistribution): def __init__(self, value): self.value = value def sample(self): retur...
EXA-1-master
exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/distributions/distributions.py
import sys import os import librosa import numpy as np import torch import audio_to_text.captioning.models import audio_to_text.captioning.models.encoder import audio_to_text.captioning.models.decoder import audio_to_text.captioning.utils.train_util as train_util def load_model(config, checkpoint): ckpt = torch.l...
EXA-1-master
exa/models/AudioGPT/audio_to_text/inference_waveform.py
EXA-1-master
exa/models/AudioGPT/audio_to_text/__init__.py
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/__init__.py
import math import torch class ExponentialDecayScheduler(torch.optim.lr_scheduler._LRScheduler): def __init__(self, optimizer, total_iters, final_lrs, warmup_iters=3000, last_epoch=-1, verbose=False): self.total_iters = total_iters self.final_lrs = final_lrs if not isinstance(self...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/utils/lr_scheduler.py
import json from tqdm import tqdm import logging import pickle from collections import Counter import re import fire class Vocabulary(object): """Simple vocabulary wrapper.""" def __init__(self): self.word2idx = {} self.idx2word = {} self.idx = 0 def add_word(self, word): i...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/utils/build_vocab_ltp.py
import json from tqdm import tqdm import logging import pickle from collections import Counter import re import fire class Vocabulary(object): """Simple vocabulary wrapper.""" def __init__(self): self.word2idx = {} self.idx2word = {} self.idx = 0 def add_word(self, word): i...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/utils/build_vocab_spacy.py
import json from tqdm import tqdm import logging import pickle from collections import Counter import re import fire class Vocabulary(object): """Simple vocabulary wrapper.""" def __init__(self): self.word2idx = {} self.idx2word = {} self.idx = 0 def add_word(self, word): ...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/utils/build_vocab.py
import argparse import torch def main(checkpoint): state_dict = torch.load(checkpoint, map_location="cpu") if "optimizer" in state_dict: del state_dict["optimizer"] if "lr_scheduler" in state_dict: del state_dict["lr_scheduler"] torch.save(state_dict, checkpoint) if __name__ == "__ma...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/utils/remove_optimizer.py
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/utils/__init__.py
# -*- coding: utf-8 -*- #!/usr/bin/env python3 import os import sys import logging from typing import Callable, Dict, Union import yaml import torch from torch.optim.swa_utils import AveragedModel as torch_average_model import numpy as np import pandas as pd from pprint import pformat def load_dict_from_csv(csv, cols...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/utils/train_util.py
import json from tqdm import tqdm import re import fire def tokenize_caption(input_json: str, keep_punctuation: bool = False, host_address: str = None, character_level: bool = False, zh: bool = True, output_json: ...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/utils/tokenize_caption.py
import json import random import argparse import numpy as np from tqdm import tqdm from h5py import File import sklearn.metrics random.seed(1) parser = argparse.ArgumentParser() parser.add_argument("train_feature", type=str) parser.add_argument("train_corpus", type=str) parser.add_argument("pred_feature", type=str) p...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/utils/predict_nn.py
from pathlib import Path import argparse import numpy as np parser = argparse.ArgumentParser() parser.add_argument("--input", help="input filename", type=str, nargs="+") parser.add_argument("--output", help="output result file", default=None) args = parser.parse_args() scores = {} for path in args.input: with o...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/utils/report_results.py
import copy import json import numpy as np import fire def evaluate_annotation(key2refs, scorer): if scorer.method() == "Bleu": scores = np.array([ 0.0 for n in range(4) ]) else: scores = 0 num_cap_per_audio = len(next(iter(key2refs.values()))) for i in range(num_cap_per_audio): ...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/utils/eval_round_robin.py
import os import sys import copy import pickle import numpy as np import pandas as pd import fire sys.path.append(os.getcwd()) def coco_score(refs, pred, scorer): if scorer.method() == "Bleu": scores = np.array([ 0.0 for n in range(4) ]) else: scores = 0 num_cap_per_audio = len(refs[list...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/utils/model_eval_diff.py
# coding=utf-8 #!/usr/bin/env python3 import numpy as np import pandas as pd import torch import gensim from gensim.models import Word2Vec from tqdm import tqdm import fire import sys import os sys.path.append(os.getcwd()) from utils.build_vocab import Vocabulary def create_embedding(vocab_file: str, ...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/utils/word2vec/create_word_embedding.py
# coding=utf-8 #!/usr/bin/env python3 import numpy as np import pandas as pd import torch from gensim.models import FastText from tqdm import tqdm import fire import sys import os sys.path.append(os.getcwd()) from utils.build_vocab import Vocabulary def create_embedding(caption_file: str, vocab_...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/utils/fasttext/create_word_embedding.py
import pickle import fire import numpy as np import pandas as pd from tqdm import tqdm class EmbeddingExtractor(object): def extract_sentbert(self, caption_file: str, output: str, dev: bool=True, zh: bool=False): from sentence_transformers import SentenceTransformer lang2model = { "zh...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/utils/bert/create_sent_embedding.py
# -*- coding: utf-8 -*- import sys import os from bert_serving.client import BertClient import numpy as np from tqdm import tqdm import fire import torch sys.path.append(os.getcwd()) from utils.build_vocab import Vocabulary def main(vocab_file: str, output: str, server_hostname: str): client = BertClient(ip=ser...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/utils/bert/create_word_embedding.py
# -*- coding: utf-8 -*- import math from functools import partial import numpy as np import torch import torch.nn as nn from .utils import generate_length_mask, init, PositionalEncoding class BaseDecoder(nn.Module): """ Take word/audio embeddings and output the next word probs Base decoder, cannot be c...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/models/decoder.py
# -*- coding: utf-8 -*- import random import torch import torch.nn as nn from .base_model import CaptionModel from .utils import repeat_tensor import audio_to_text.captioning.models.decoder class TransformerModel(CaptionModel): def __init__(self, encoder: nn.Module, decoder: nn.Module, **kwargs): if not...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/models/transformer_model.py
from .base_model import * from .transformer_model import *
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/models/__init__.py
# -*- coding: utf-8 -*- import math import copy import torch import torch.nn as nn import torch.nn.functional as F from torchaudio import transforms from torchlibrosa.augmentation import SpecAugmentation from .utils import mean_with_lens, max_with_lens, \ init, pack_wrapper, generate_length_mask, PositionalEncod...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/models/encoder.py
# -*- coding: utf-8 -*- from typing import Dict import torch import torch.nn as nn from .utils import mean_with_lens, repeat_tensor class CaptionModel(nn.Module): """ Encoder-decoder captioning model. """ pad_idx = 0 start_idx = 1 end_idx = 2 max_length = 20 def __init__(self, enc...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/models/base_model.py
import math import numpy as np import torch import torch.nn as nn from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence def sort_pack_padded_sequence(input, lengths): sorted_lengths, indices = torch.sort(lengths, descending=True) tmp = pack_padded_sequence(input[indices], ...
EXA-1-master
exa/models/AudioGPT/audio_to_text/captioning/models/utils.py
import librosa import librosa.filters import math import numpy as np import scipy.io.wavfile def load_wav(path): max_length = 32000 * 10 wav = librosa.core.load(path, sr=32000)[0] if len(wav) > max_length: audio = wav[0:max_length] # pad audio to max length, 10s for AudioCaps if len(wav) <...
EXA-1-master
exa/models/AudioGPT/sound_extraction/utils/wav_io.py
import torch import numpy as np import torch.nn.functional as F from torch.autograd import Variable from scipy.signal import get_window import librosa.util as librosa_util from librosa.util import pad_center, tiny # from audio_processing import window_sumsquare def window_sumsquare(window, n_frames, hop_length=512, wi...
EXA-1-master
exa/models/AudioGPT/sound_extraction/utils/stft.py
import torch import numpy as np def add_noise_and_scale(front, noise, snr_l=0, snr_h=0, scale_lower=1.0, scale_upper=1.0): """ :param front: front-head audio, like vocal [samples,channel], will be normlized so any scale will be fine :param noise: noise, [samples,channel], any scale :param snr_l: Option...
EXA-1-master
exa/models/AudioGPT/sound_extraction/utils/create_mixtures.py
import torch import torch.nn as nn class Film(nn.Module): def __init__(self, channels, cond_embedding_dim): super(Film, self).__init__() self.linear = nn.Sequential( nn.Linear(cond_embedding_dim, channels * 2), nn.ReLU(inplace=True), nn.Linear(channels * 2, chann...
EXA-1-master
exa/models/AudioGPT/sound_extraction/model/film.py
from .modules import * import numpy as np class UNetRes_FiLM(nn.Module): def __init__(self, channels, cond_embedding_dim, nsrc=1): super(UNetRes_FiLM, self).__init__() activation = 'relu' momentum = 0.01 self.nsrc = nsrc self.channels = channels self.downsample_rati...
EXA-1-master
exa/models/AudioGPT/sound_extraction/model/resunet_film.py
import torch import torch.nn as nn from transformers import * import warnings warnings.filterwarnings('ignore') # pretrained model name: (model class, model tokenizer, output dimension, token style) MODELS = { 'prajjwal1/bert-mini': (BertModel, BertTokenizer), } class Text_Encoder(nn.Module): def __init__(self...
EXA-1-master
exa/models/AudioGPT/sound_extraction/model/text_encoder.py
import torch import torch.nn as nn import torch.nn.functional as F import math from .film import Film class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, activation, momentum): super(ConvBlock, self).__init__() self.activation = activation padding = (kern...
EXA-1-master
exa/models/AudioGPT/sound_extraction/model/modules.py
import torch import torch.nn as nn import torch.nn.functional as F from .text_encoder import Text_Encoder from .resunet_film import UNetRes_FiLM class LASSNet(nn.Module): def __init__(self, device='cuda'): super(LASSNet, self).__init__() self.text_embedder = Text_Encoder(device) self.UNet =...
EXA-1-master
exa/models/AudioGPT/sound_extraction/model/LASSNet.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ import torch as th import torch.nn as nn import torch.nn.functional as F class TimeWarperFunction(th.autograd.Function): ...
EXA-1-master
exa/models/AudioGPT/mono2binaural/src/warping.py
import numpy as np import scipy.linalg from scipy.spatial.transform import Rotation as R import torch as th import torch.nn as nn import torch.nn.functional as F from src.warping import GeometricTimeWarper, MonotoneTimeWarper from src.utils import Net class GeometricWarper(nn.Module): def __init__(self, sampling_...
EXA-1-master
exa/models/AudioGPT/mono2binaural/src/models.py
""" Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ import numpy as np import torch as th #import torchaudio as ta class Net(th.nn.Module): def __init__(self, model_name...
EXA-1-master
exa/models/AudioGPT/mono2binaural/src/utils.py
EXA-1-master
exa/models/AudioGPT/audio_detection/__init__.py
EXA-1-master
exa/models/AudioGPT/audio_detection/audio_infer/__init__.py
import numpy as np import csv sample_rate = 32000 clip_samples = sample_rate * 10 # Audio clips are 10-second # Load label with open('./audio_detection/audio_infer/metadata/class_labels_indices.csv', 'r') as f: reader = csv.reader(f, delimiter=',') lines = list(reader) labels = [] ids = [] # Each labe...
EXA-1-master
exa/models/AudioGPT/audio_detection/audio_infer/utils/config.py
import sys class ExceptionHook: instance = None def __call__(self, *args, **kwargs): if self.instance is None: from IPython.core import ultratb self.instance = ultratb.FormattedTB(mode='Plain', color_scheme='Linux', call_pdb=1) return self.instance(*args...
EXA-1-master
exa/models/AudioGPT/audio_detection/audio_infer/utils/crash.py
import argparse import csv import os from utilities import create_folder def dcase2017task4(args): """Create black list. Black list is a list of audio ids that will be skipped in training. """ # Augments & parameters workspace = args.workspace # Black list from DCASE 2017 Task 4 t...
EXA-1-master
exa/models/AudioGPT/audio_detection/audio_infer/utils/create_black_list.py
import numpy as np import argparse import csv import os import glob import datetime import time import logging import h5py import librosa from utilities import (create_folder, get_filename, create_logging, float32_to_int16, pad_or_truncate, read_metadata) import config def split_unbalanced_csv_to_partial_csvs(a...
EXA-1-master
exa/models/AudioGPT/audio_detection/audio_infer/utils/dataset.py
import numpy as np import argparse import csv import os import glob import datetime import time import logging import h5py import librosa from utilities import create_folder, get_sub_filepaths import config def create_indexes(args): """Create indexes a for dataloader to read for training. When users have a ...
EXA-1-master
exa/models/AudioGPT/audio_detection/audio_infer/utils/create_indexes.py
import os import sys import numpy as np import argparse import h5py import time import _pickle as cPickle import _pickle import matplotlib.pyplot as plt import csv from sklearn import metrics from utilities import (create_folder, get_filename, d_prime) import config def _load_metrics0(filename, sample_rate, window_s...
EXA-1-master
exa/models/AudioGPT/audio_detection/audio_infer/utils/plot_statistics.py
import os import sys import numpy as np import argparse import h5py import time import pickle import matplotlib.pyplot as plt import csv from sklearn import metrics from utilities import (create_folder, get_filename, d_prime) import config def load_statistics(statistics_path): statistics_dict = pickle.load(open(...
EXA-1-master
exa/models/AudioGPT/audio_detection/audio_infer/utils/plot_for_paper.py
import os import logging import h5py import soundfile import librosa import numpy as np import pandas as pd from scipy import stats import datetime import pickle def create_folder(fd): if not os.path.exists(fd): os.makedirs(fd) def get_filename(path): path = os.path.realpath(path) ...
EXA-1-master
exa/models/AudioGPT/audio_detection/audio_infer/utils/utilities.py
import numpy as np import h5py import csv import time import logging from utilities import int16_to_float32 def read_black_list(black_list_csv): """Read audio names from black list. """ with open(black_list_csv, 'r') as fr: reader = csv.reader(fr) lines = list(reader) black_list_nam...
EXA-1-master
exa/models/AudioGPT/audio_detection/audio_infer/utils/data_generator.py
import numpy as np import time import torch import torch.nn as nn def move_data_to_device(x, device): if 'float' in str(x.dtype): x = torch.Tensor(x) elif 'int' in str(x.dtype): x = torch.LongTensor(x) else: return x return x.to(device) def do_mixup(x, mixup_lambda): """...
EXA-1-master
exa/models/AudioGPT/audio_detection/audio_infer/pytorch/pytorch_utils.py
import torch import torch.nn as nn import torch.nn.functional as F from torchlibrosa.stft import Spectrogram, LogmelFilterBank from torchlibrosa.augmentation import SpecAugmentation from audio_infer.pytorch.pytorch_utils import do_mixup, interpolate, pad_framewise_output import os import sys import math import numpy a...
EXA-1-master
exa/models/AudioGPT/audio_detection/audio_infer/pytorch/models.py
import os import sys sys.path.insert(1, os.path.join(sys.path[0], '../utils')) import numpy as np import argparse import librosa import matplotlib.pyplot as plt import torch from utilities import create_folder, get_filename from models import * from pytorch_utils import move_data_to_device import config def audio_tag...
EXA-1-master
exa/models/AudioGPT/audio_detection/audio_infer/pytorch/inference.py
from sklearn import metrics from pytorch_utils import forward class Evaluator(object): def __init__(self, model): """Evaluator. Args: model: object """ self.model = model def evaluate(self, data_loader): """Forward evaluation data and calculate stat...
EXA-1-master
exa/models/AudioGPT/audio_detection/audio_infer/pytorch/evaluate.py
import torch import torch.nn.functional as F def clip_bce(output_dict, target_dict): """Binary crossentropy loss. """ return F.binary_cross_entropy( output_dict['clipwise_output'], target_dict['target']) def get_loss_func(loss_type): if loss_type == 'clip_bce': return clip_bce
EXA-1-master
exa/models/AudioGPT/audio_detection/audio_infer/pytorch/losses.py
import os import sys sys.path.insert(1, os.path.join(sys.path[0], '../utils')) import numpy as np import argparse import h5py import math import time import logging import matplotlib.pyplot as plt import torch torch.backends.cudnn.benchmark=True torch.manual_seed(0) import torch.nn as nn import torch.nn.functional as ...
EXA-1-master
exa/models/AudioGPT/audio_detection/audio_infer/pytorch/finetune_template.py
import os import sys sys.path.insert(1, os.path.join(sys.path[0], '../utils')) import numpy as np import argparse import time import logging import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.utils.data from utilities import (create_folder, get_filename, creat...
EXA-1-master
exa/models/AudioGPT/audio_detection/audio_infer/pytorch/main.py
# !/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2021/3/9 16:33 # @Author : dongchao yang # @File : train.py from itertools import zip_longest import numpy as np from scipy import ndimage import torch import torch.nn as nn import torch.nn.functional as F import time from torchlibrosa.augmentation import ...
EXA-1-master
exa/models/AudioGPT/audio_detection/target_sound_detection/src/models.py
# !/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2021/3/9 16:33 # @Author : dongchao yang # @File : train.py import collections import sys from loguru import logger from pprint import pformat import numpy as np import pandas as pd import scipy import six import sklearn.preprocessing as pre import torch...
EXA-1-master
exa/models/AudioGPT/audio_detection/target_sound_detection/src/utils.py
from setuptools import setup, find_packages setup( name = 'recurrent-memory-transformer-pytorch', packages = find_packages(exclude=[]), version = '0.2.2', license='MIT', description = 'Recurrent Memory Transformer - Pytorch', author = 'Phil Wang', author_email = 'lucidrains@gmail.com', long_description...
EXA-1-master
exa/models/recurrent-memory-transformer-pytorch/setup.py
import gzip import random import tqdm import numpy as np import torch from torch.optim import Adam from torch.nn import functional as F from torch.utils.data import DataLoader, Dataset from recurrent_memory_transformer_pytorch import RecurrentMemoryTransformer, RecurrentMemoryTransformerWrapper # constants NUM_BATC...
EXA-1-master
exa/models/recurrent-memory-transformer-pytorch/train.py
from recurrent_memory_transformer_pytorch.recurrent_memory_transformer import RecurrentMemoryTransformer, RecurrentMemoryTransformerWrapper
EXA-1-master
exa/models/recurrent-memory-transformer-pytorch/recurrent_memory_transformer_pytorch/__init__.py
from collections import namedtuple from functools import wraps from packaging import version import torch from torch import nn, einsum import torch.nn.functional as F from einops import rearrange # constants Config = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient']) # ...
EXA-1-master
exa/models/recurrent-memory-transformer-pytorch/recurrent_memory_transformer_pytorch/attend.py
import math from functools import partial from itertools import zip_longest from contextlib import nullcontext from typing import Optional, List import torch import torch.nn.functional as F from torch import nn, einsum from einops import rearrange, repeat, pack, unpack from recurrent_memory_transformer_pytorch.atten...
EXA-1-master
exa/models/recurrent-memory-transformer-pytorch/recurrent_memory_transformer_pytorch/recurrent_memory_transformer.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. import codecs import os import platform import re from glob import glob import setuptools from setuptools import Extension from setuptools.command.build_ext import build_ext def clean_html(raw_html): cleanr = re.compile("<.*?>") clea...
EXA-1-master
exa/models/mmf-main/setup.py
# Copyright (c) Facebook, Inc. and its affiliates.
EXA-1-master
exa/models/mmf-main/tools/__init__.py
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. import lib as sweep from lib import hyperparam def get_grid(args): max_update = 22000 return [ hyperparam("run_type", "train_val"), hyperparam("config", "projects/visual_bert/configs/vqa2/defaults.yaml"), # hyp...
EXA-1-master
exa/models/mmf-main/tools/sweeps/sweep_visual_bert.py
# Copyright (c) Facebook, Inc. and its affiliates. # Copied from fairseq. Mostly written by @myleott. Adapted accordingly for mmf import datetime import itertools import os import random import shlex import shutil import subprocess from collections import OrderedDict from glob import glob from mmf.utils.general impor...
EXA-1-master
exa/models/mmf-main/tools/sweeps/lib/slurm.py
# Copyright (c) Facebook, Inc. and its affiliates. # Copied from fairseq. Mostly written by @myleott. Adapted accordingly for mmf import argparse import datetime import json import os import socket # if argv is None, we will read from sys.argv (invoke params) def get_args(argv=None): parser = argparse.ArgumentPa...
EXA-1-master
exa/models/mmf-main/tools/sweeps/lib/__init__.py