text
stringlengths
1
93.6k
mel_input = torch.randn(
(1, 80, 88)).cuda()
else:
raise Exception("Mode {} if not supported".format(mode))
with torch.no_grad():
bias_audio = melgan.inference(mel_input).float() # [B, 1, T]
bias_spec, _ = self.stft.transform(bias_audio.squeeze(0))
self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None])
def forward(self, audio, strength=0.1):
audio_spec, audio_angles = self.stft.transform(audio.cuda().float())
audio_spec_denoised = audio_spec.cuda() - self.bias_spec * strength
audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles.cuda())
return audio_denoised
# <FILESEP>
from __future__ import print_function, division
import os
import torch
import pandas as pd
#from skimage import io, transform
import cv2
import numpy as np
import random
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
import pdb
import math
import os
import imgaug.augmenters as iaa
#face_scale = 0.9 #default for test, for training , can be set from [0.8 to 1.0]
# data augment from 'imgaug' --> Add (value=(-40,40), per_channel=True), GammaContrast (gamma=(0.5,1.5))
seq = iaa.Sequential([
iaa.Add(value=(-40,40), per_channel=True), # Add color
iaa.GammaContrast(gamma=(0.5,1.5)) # GammaContrast with a gamma of 0.5 to 1.5
])
# Tensor
class Cutout(object):
def __init__(self, length=30):
self.length = length
def __call__(self, sample):
img, image_x_depth, image_x_ir, spoofing_label, map_x1 = sample['image_x'],sample['image_x_depth'],sample['image_x_ir'],sample['spoofing_label'],sample['map_x1']
h, w = img.shape[1], img.shape[2] # Tensor [1][2], nparray [0][1]
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
length_new = np.random.randint(1, self.length)
y1 = np.clip(y - length_new // 2, 0, h)
y2 = np.clip(y + length_new // 2, 0, h)
x1 = np.clip(x - length_new // 2, 0, w)
x2 = np.clip(x + length_new // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
image_x_depth *= mask
image_x_ir *= mask
return {'image_x': img, 'image_x_depth': image_x_depth, 'image_x_ir': image_x_ir, 'spoofing_label': spoofing_label, 'map_x1': map_x1}
class Normaliztion(object):
"""
same as mxnet, normalize into [-1, 1]
image = (image - 127.5)/128
"""
def __call__(self, sample):
image_x, image_x_depth, image_x_ir, spoofing_label, map_x1 = sample['image_x'],sample['image_x_depth'],sample['image_x_ir'],sample['spoofing_label'],sample['map_x1']
new_image_x = (image_x - 127.5)/128 # [-1,1]
new_image_x_depth = (image_x_depth - 127.5)/128 # [-1,1]
new_image_x_ir = (image_x_ir - 127.5)/128 # [-1,1]
return {'image_x': new_image_x, 'image_x_depth': new_image_x_depth, 'image_x_ir': new_image_x_ir, 'spoofing_label': spoofing_label, 'map_x1': map_x1}
class RandomHorizontalFlip(object):
"""Horizontally flip the given Image randomly with a probability of 0.5."""
def __call__(self, sample):
image_x, image_x_depth, image_x_ir, spoofing_label, map_x1 = sample['image_x'],sample['image_x_depth'],sample['image_x_ir'],sample['spoofing_label'],sample['map_x1']
new_image_x = np.zeros((224, 224, 3))
new_image_x_depth = np.zeros((224, 224, 3))
new_image_x_ir = np.zeros((224, 224, 3))