repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
interactive-image2video-synthesis | interactive-image2video-synthesis-main/utils/general.py | import torch
import os
import subprocess
import logging
import yaml
import logging.config
import inspect
from os import walk
import numpy as np
import coloredlogs
import multiprocessing as mp
from threading import Thread
from queue import Queue
from collections import abc
import cv2
from torch import nn
# import kornia
def get_member(model, name):
if isinstance(model, nn.DataParallel):
module = model.module
else:
module = model
return getattr(module, name)
def convert_flow_2d_to_3d(flow):
amplitude = torch.sqrt(torch.sum(flow * flow, dim=0, keepdim=True))
# fix division by zero
scaler = amplitude.clone()
scaler[scaler==0.0] = 1.0
flow = flow/scaler
flow = torch.cat([flow, amplitude], dim=0)
# n_flow = torch.sqrt(torch.sum(flow[:2] * flow[:2], dim=0))
# print(torch.max(n_flow.view((-1))))
return flow
def convert_flow_2d_to_3d_batch(flows):
final = []
for flow in flows:
converted = convert_flow_2d_to_3d(flow)
final.append(converted[None])
final = torch.cat(final, dim=0)
return final
def get_flow_gradients(flow, device=None):
"""torch in, torch out"""
flow = flow[:, None]
sobel = [[1, 2, 1], [0, 0, 0], [-1, -2, -1]]
sobel_kernel_y = torch.tensor(sobel, dtype=torch.float32).unsqueeze(0)
sobel_kernel_x = torch.transpose(sobel_kernel_y, 1, 2)
sobel_kernel_x, sobel_kernel_y = sobel_kernel_x.expand((1, 1, 3, 3)), sobel_kernel_y.expand((1, 1, 3, 3))
if flow.is_cuda:
sobel_kernel_x, sobel_kernel_y = sobel_kernel_x.to(flow.get_device()), sobel_kernel_y.to(flow.get_device())
gradient_d1_x = torch.nn.functional.conv2d(flow, sobel_kernel_x, stride=1, padding=1)
gradient_d2_x = torch.nn.functional.conv2d(gradient_d1_x, sobel_kernel_x, stride=1, padding=1)
gradient_d1_y = torch.nn.functional.conv2d(flow, sobel_kernel_y, stride=1, padding=1)
gradient_d2_y = torch.nn.functional.conv2d(gradient_d1_y, sobel_kernel_y, stride=1, padding=1)
gradient_d1_x, gradient_d2_x, gradient_d1_y, gradient_d2_y = gradient_d1_x.squeeze(),\
gradient_d2_x.squeeze(),\
gradient_d1_y.squeeze(),\
gradient_d2_y.squeeze()
gradient_d1_x = torch.sqrt(torch.sum(gradient_d1_x ** 2, dim=0))
gradient_d1_y = torch.sqrt(torch.sum(gradient_d1_y ** 2, dim=0))
gradient_d2_x = torch.sqrt(torch.sum(gradient_d2_x ** 2, dim=0))
gradient_d2_y = torch.sqrt(torch.sum(gradient_d2_y ** 2, dim=0))
return gradient_d1_x, gradient_d1_y, gradient_d2_x, gradient_d2_y
def get_flow_gradients_batch(flows):
final = []
for flow in flows:
gradient_d1_x, gradient_d1_y, gradient_d2_x, gradient_d2_y = get_flow_gradients(flow)
all_gradients = [gradient_d1_x,
gradient_d1_y,
gradient_d2_x,
gradient_d2_y]
stacked = torch.stack(all_gradients, dim=0).squeeze(dim=0)
final.append(stacked)
final = torch.stack(final, dim=0).squeeze(dim=0)
return final
class LoggingParent:
def __init__(self):
super(LoggingParent, self).__init__()
# find project root
mypath = inspect.getfile(self.__class__)
mypath = "/".join(mypath.split("/")[:-1])
found = False
while mypath!="" and not found:
f = []
for (dirpath, dirnames, filenames) in walk(mypath):
f.extend(filenames)
break
if ".gitignore" in f:
found = True
continue
mypath = "/".join(mypath.split("/")[:-1])
project_root = mypath+"/"
# Put it together
file = inspect.getfile(self.__class__).replace(project_root, "").replace("/", ".").split(".py")[0]
cls = str(self.__class__)[8:-2]
cls = str(cls).replace("__main__.", "").split(".")[-1]
self.logger = get_logger(f"{file}.{cls}")
def get_gpu_id_with_lowest_memory(index=0, target_gpus:list=None):
# get info from nvidia-smi
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.free',
'--format=csv,nounits,noheader'
], encoding='utf-8')
gpu_memory = [int(x) for x in result.strip().split('\n')]
# get the one with the lowest usage
if target_gpus is None:
indices = np.argsort(gpu_memory)
else:
indices = [i for i in np.argsort(gpu_memory) if i in target_gpus]
return torch.device(f"cuda:{indices[-index-1]}")
iuhihfie_logger_loaded = False
def get_logger(name):
# setup logging
global iuhihfie_logger_loaded
if not iuhihfie_logger_loaded:
with open(f'{os.path.dirname(os.path.abspath(__file__))}/logging.yaml', 'r') as f:
log_cfg = yaml.load(f.read(), Loader=yaml.FullLoader)
logging.config.dictConfig(log_cfg)
iuhihfie_logger_loaded = True
logger = logging.getLogger(name)
coloredlogs.install(logger=logger, level="DEBUG")
return logger
def save_model_to_disk(path, models, epoch):
for i, model in enumerate(models):
tmp_path = path
if not os.path.exists(path):
os.makedirs(path)
tmp_path = tmp_path + f"model_{i}-epoch{epoch}"
torch.save(model.state_dict(), tmp_path)
def _do_parallel_data_prefetch(func, Q, data, idx):
# create dummy dataset instance
# run prefetching
res = func(data)
Q.put([idx, res])
Q.put("Done")
def parallel_data_prefetch(
func: callable, data, n_proc, target_data_type="ndarray",cpu_intensive=True
):
if target_data_type not in ["ndarray", "list"]:
raise ValueError(
"Data, which is passed to parallel_data_prefetch has to be either of type list or ndarray."
)
if isinstance(data, np.ndarray) and target_data_type == "list":
raise ValueError("list expected but function got ndarray.")
elif isinstance(data, abc.Iterable):
if isinstance(data, dict):
print(
f'WARNING:"data" argument passed to parallel_data_prefetch is a dict: Using only its values and disregarding keys.'
)
data = list(data.values())
if target_data_type == "ndarray":
data = np.asarray(data)
else:
data = list(data)
else:
raise TypeError(
f"The data, that shall be processed parallel has to be either an np.ndarray or an Iterable, but is actually {type(data)}."
)
if cpu_intensive:
Q = mp.Queue(1000)
proc = mp.Process
else:
Q = Queue(1000)
proc = Thread
# spawn processes
if target_data_type == "ndarray":
arguments = [
[func, Q, part, i]
for i, part in enumerate(np.array_split(data, n_proc))
]
else:
step = (
int(len(data) / n_proc + 1)
if len(data) % n_proc != 0
else int(len(data) / n_proc)
)
arguments = [
[func, Q, part, i]
for i, part in enumerate(
[data[i : i + step] for i in range(0, len(data), step)]
)
]
processes = []
for i in range(n_proc):
p = proc(target=_do_parallel_data_prefetch, args=arguments[i])
processes += [p]
# start processes
print(f"Start prefetching...")
import time
start = time.time()
gather_res = [[] for _ in range(n_proc)]
try:
for p in processes:
p.start()
k = 0
while k < n_proc:
# get result
res = Q.get()
if res == "Done":
k += 1
else:
gather_res[res[0]] = res[1]
except Exception as e:
print("Exception: ", e)
for p in processes:
p.terminate()
raise e
finally:
for p in processes:
p.join()
print(f"Prefetching complete. [{time.time() - start} sec.]")
if not isinstance(gather_res[0], np.ndarray):
return np.concatenate([np.asarray(r) for r in gather_res], axis=0)
# order outputs
return np.concatenate(gather_res, axis=0)
def linear_var(
act_it, start_it, end_it, start_val, end_val, clip_min, clip_max
):
act_val = (
float(end_val - start_val) / (end_it - start_it) * (act_it - start_it)
+ start_val
)
return np.clip(act_val, a_min=clip_min, a_max=clip_max)
def get_patches(seq_batch,weights,config,fg_value, logger = None):
"""
:param seq_batch: Batch of videos
:param weights: batch of flow weights for the videos
:param config: config, containing spatial_size
:param fg_value: foreground value of the weight map
:return:
"""
import kornia
weights_as_bool = torch.eq(weights,fg_value)
cropped = []
for vid,weight in zip(seq_batch,weights_as_bool):
vid_old = vid
weight_ids = torch.nonzero(weight,as_tuple=True)
try:
min_y = weight_ids[0].min()
max_y = weight_ids[0].max()
min_x = weight_ids[1].min()
max_x = weight_ids[1].max()
vid = vid[...,min_y:max_y,min_x:max_x]
if len(vid.shape) < 4:
data_4d = vid[None,...]
vid = kornia.transform.resize(data_4d, config["spatial_size"])
cropped.append(vid.squeeze(0))
else:
vid = kornia.transform.resize(vid,config["spatial_size"])
cropped.append(vid)
except Exception as e:
if logger is None:
print(e)
else:
logger.warn(f'Catched the following exception in "get_patches": {e.__class__.__name__}: {e}. Skip patching this sample...')
cropped.append(vid_old)
return torch.stack(cropped,dim=0)
if __name__ == "__main__":
print(get_gpu_id_with_lowest_memory())
| 10,061 | 33.108475 | 139 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/utils/eval_models.py | import argparse
from os import path
import yaml
import os
from experiments import select_experiment
def create_dir_structure(model_name, experiment, base_dir):
subdirs = ["ckpt", "config", "generated", "log"]
structure = {subdir: path.join(base_dir,experiment,subdir,model_name) for subdir in subdirs}
if "DATAPATH" in os.environ:
structure = {subdir: os.environ["DATAPATH"] +structure[subdir] for subdir in structure}
return structure
def load_parameters(model_name,exp, base_dir):
dir_structure = create_dir_structure(model_name,exp, base_dir)
saved_config = path.join(dir_structure["config"], "config.yaml")
if path.isfile(saved_config):
with open(saved_config, "r") as f:
cdict = yaml.load(f, Loader=yaml.FullLoader)
else:
raise FileNotFoundError("No saved config file found but model is intended to be restarted. Aborting....")
return cdict, dir_structure,
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--experiment", type=str,
default="fixed_length_model",
help="Source directory.")
parser.add_argument('-b', '--base_dir', required=True,
type=str, help='the base directory, where all logs, configs, checkpoints and evaluation results are stored.')
parser.add_argument("--gpu", type=int, required=True, help="The target device.")
parser.add_argument("--mode", default="metrics", type=str, choices=["metrics", "fvd"],
help="The mode in which the test-method should be executed.")
parser.add_argument("--metrics_on_patches", default=False, action="store_true",
help="Whether to run evaluation on patches (if available or not).")
parser.add_argument("--best_ckpt", default=False, action="store_true",
help="Whether to use the best ckpt as measured by LPIPS (otherwise, latest_ckpt is used)")
args = parser.parse_args()
with open("config/model_names.txt", "r") as f:
model_names = f.readlines()
model_names = [m for m in model_names if not m.startswith("#")]
gpu = args.gpu
for model in model_names:
model = model.rstrip()
print(f"Evaluate model : {model}")
cdict, dirs = load_parameters(model,args.experiment,args.base_dir)
cdict["testing"].update({"mode":args.mode})
cdict["general"]["mode"] = "test"
cdict["testing"].update({"best_ckpt": args.best_ckpt})
cdict["testing"]["metrics_on_patches"] = args.metrics_on_patches
cdict["general"]["restart"] = True
experiment = select_experiment(cdict, dirs, args.gpu)
try:
experiment.test()
except FileNotFoundError as e:
print(e)
| 2,819 | 39.285714 | 133 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/data/flow_dataset.py | from os import path
import numpy as np
import pickle
from copy import deepcopy
import torch
from torch.nn import functional as F
from torch.utils.data import Dataset
from torchvision import transforms as tt
from tqdm import tqdm
import cv2
from natsort import natsorted
import os
from glob import glob
from utils.general import parallel_data_prefetch, LoggingParent
from data.helper_functions import preprocess_image
from data.base_dataset import BaseDataset
class PlantDataset(BaseDataset):
def __init__(self, transforms, datakeys, config, train=True, google_imgs=False, n_ref_frames=None):
self.excluded_objects = config["excluded_objects"] if "excluded_objects" in config else []
super().__init__(transforms, datakeys, config,train=train)
self.logger.info(f"Initializing {self.__class__.__name__}.")
if self.config["spatial_size"][0] <= 256:
self.flow_in_ram = self.config["flow_in_ram"] if "flow_in_ram" in self.config else False
if self.config["spatial_size"][0] <= 256:
self.imgs_in_ram = self.config["imgs_in_ram"] if "imgs_in_ram" in self.config else False
# set instace specific fixed values which shall not be parameters from yaml
self._set_instance_specific_values()
self.subsample_step = config["subsample_step"] if "subsample_step" in config else self.subsample_step
self.logger.info(f'Subsample step of {self.__class__.__name__} is {self.subsample_step}.')
filt_msg = "enabled" if self.filter_flow else "disabled"
self.logger.info(f"Flow filtering is {filt_msg} in {self.__class__.__name__}!")
self.logger.info(f"Valid lag of {self.__class__.__name__} is {self.valid_lags[0]}")
# load data
metafile_path = path.join(self.datapath, f"{self.metafilename}.p")
with open(metafile_path, "rb") as handle:
self.data = pickle.load(handle)
if path.isfile(path.join(self.datapath,"dataset_stats.p")) and self.normalize_flows:
with open(path.join(self.datapath,"dataset_stats.p"),"rb") as norm_file:
self.flow_norms = pickle.load(norm_file)
# choose filter procedure
available_frame_nrs = np.asarray([int(p.split("/")[-1].split(".")[0].split("_")[-1]) - int(p.split("/")[-1].split(".")[0].split("_")[-2]) for p in self.data["flow_paths"][0]])
# filter invalid flow_paths
self.data["flow_paths"] = [p for p in self.data["flow_paths"] if len(p) == len(available_frame_nrs)]
self.filter_proc = self.config["filter"] if "filter" in self.config else "all"
# remove invalid video
valid_ids = np.logical_not(np.char.startswith(self.data["img_path"],"VID_0_3_1024x1024"))
# set flow paths in right order after reading in the data
if "max_fid" not in self.data:
self.data["flow_paths"] = [natsorted(d) for d in self.data["flow_paths"]]
# make absolute image and flow paths
self.data["img_path"] = [
path.join(self.datapath, p if not p.startswith("/") else p[1:]) for p in self.data["img_path"]
]
self.data["flow_paths"] = [
[path.join(self.datapath, f if not f.startswith("/") else f[1:]) for f in fs]
for fs in self.data["flow_paths"]
]
# convert to numpy array
self.data = {key: np.asarray(self.data[key])[valid_ids] for key in self.data}
# if max fid is not predefined, the videos, the dataset consists of are sufficiently long, such that it doesn't make much of a difference,
# if some frames at the end are skipped, therefore, we set the last valid fid (which is indicated by "max_fid") to the maximum fid
# in the respective sequence
if "max_fid" not in self.data:
available_frame_nrs = np.asarray([int(p.split("/")[-1].split(".")[0].split("_")[-1]) - int(p.split("/")[-1].split(".")[0].split("_")[-2]) for p in self.data["flow_paths"][0]])
self.data.update({"max_fid": np.zeros((np.asarray(self.data["fid"]).shape[0],max(len(available_frame_nrs),self.valid_lags[0]+1)),dtype=np.int)})
for vid in np.unique(self.data["vid"]):
self.data["max_fid"][self.data["vid"] == vid] = np.amax(self.data["fid"][self.data["vid"] == vid])
if not self.var_sequence_length and ("poke" in self.datakeys or "flow" in self.datakeys) and not self.normalize_flows:
# reset valid_lags, such that always the right flow which corresponds to the respective sequence length, is chosen
if not self.__class__.__name__ == "Human36mDataset":
available_frame_nrs = np.asarray([int(p.split("/")[-1].split(".")[0].split("_")[-1]) - int(p.split("/")[-1].split(".")[0].split("_")[-2]) for p in self.data["flow_paths"][0]])
if "n_ref_frames" not in self.config:
assert self.max_frames * self.subsample_step in available_frame_nrs
right_lag = int(np.argwhere(available_frame_nrs == self.max_frames * self.subsample_step))
self.logger.info(f'Last frames of sequence serves as target frame.')
else:
self.logger.info(f'Number of frames in between target and start frames is {self.config["n_ref_frames"]}')
assert self.config["n_ref_frames"]*self.subsample_step in available_frame_nrs
right_lag = int(np.argwhere(available_frame_nrs==self.config["n_ref_frames"] * self.subsample_step))
self.valid_lags = [right_lag]
else:
assert self.max_frames == 10
assert self.subsample_step in [1,2]
self.valid_lags = [0] if self.subsample_step == 1 else [1]
self.logger.info(f"Dataset is run in fixed length mode, valid lags are {self.valid_lags}.")
filt_msg = "enabled" if self.filter_flow else "disabled"
self.logger.info(f"Flow filtering is {filt_msg} in {self.__class__.__name__}!")
self.logger.info(f"Valid lag of {self.__class__.__name__} is {self.valid_lags[0]}")
filt_msg = "enabled" if self.obj_weighting else "disabled"
self.logger.info(f"Object weighting is {filt_msg} in {self.__class__.__name__}!")
filt_msg = "enabled" if self.flow_weights else "disabled"
self.logger.info(f"Patch weighting is {filt_msg} in {self.__class__.__name__}!")
filt_msg = "enabled" if self.use_flow_for_weights else "disabled"
self.logger.info(f"Flow patch extraction is {filt_msg} in {self.__class__.__name__}!")
if self.filter_proc == "action":
self.data = {key:self.data[key][self.data["action_id"]==2] for key in self.data}
elif self.filter_proc == "pose":
self.data = {key: self.data[key][self.data["action_id"] == 1] for key in self.data}
# on this point, the raw data is parsed and can be processed further
# exclude invalid object ids from data
self.logger.info(f"Excluding the following, user-defined object ids: {self.excluded_objects} from dataloading.")
kept_ids = np.nonzero(np.logical_not(np.isin(self.data["object_id"], self.excluded_objects)))[0]
self.data = {key:self.data[key][kept_ids] for key in self.data}
self.split = self.config["split"]
split_data, train_indices, test_indices = self._make_split(self.data)
self.datadict = (
split_data["train"] if self.train else split_data["test"]
)
msg = "train" if self.train else "test"
vids, start_ids = np.unique(self.datadict["vid"],return_index=True)
# get start and end ids per sequence
self.eids_per_seq = {vid: np.amax(np.flatnonzero(self.datadict["vid"] == vid)) for vid in vids}
seids = np.asarray([self.eids_per_seq[self.datadict["vid"][i]] for i in range(self.datadict["img_path"].shape[0])],dtype=np.int)
self.datadict.update({"seq_end_id": seids})
self.sids_per_seq = {vid:i for vid,i in zip(vids,start_ids)}
self.seq_len_T_chunk = {l: c for l,c in enumerate(np.linspace(0,self.flow_cutoff,self.max_frames,endpoint=False))}
# add last chunk
self.seq_len_T_chunk.update({self.max_frames: self.flow_cutoff})
if self.var_sequence_length:
if "flow_range" in self.datadict.keys():
self.ids_per_seq_len = {length: np.flatnonzero(np.logical_and(np.logical_and(self.datadict["flow_range"][:,1,self.valid_lags[0]]>self.seq_len_T_chunk[length],
np.less_equal(np.arange(self.datadict["img_path"].shape[0]) +
(self.min_frames + length)*self.subsample_step + 1,
self.datadict["seq_end_id"])),
np.less_equal(self.datadict["fid"],self.datadict["max_fid"][:,self.valid_lags[0]])))
for length in np.arange(self.max_frames)}
else:
self.ids_per_seq_len = {length: np.flatnonzero(np.less_equal(self.datadict["fid"],self.datadict["max_fid"][:,self.valid_lags[0]])) for length in np.arange(self.max_frames)}
for length in self.ids_per_seq_len:
actual_ids = self.ids_per_seq_len[length]
oids, counts_per_obj = np.unique(self.datadict["object_id"][actual_ids],return_counts=True)
weights = np.zeros_like(actual_ids,dtype=np.float)
for oid,c in zip(oids,counts_per_obj):
weights[self.datadict["object_id"][actual_ids]==oid] = 1. / (c * oids.shape[0])
self.object_weights_per_seq_len.update({length:weights})
obj_ids, obj_counts = np.unique(self.datadict["object_id"], return_counts=True)
weights = np.zeros_like(self.datadict["object_id"], dtype=np.float)
for (oid, c) in zip(obj_ids, obj_counts):
weights[self.datadict["object_id"] == oid] = 1. / c
weights = weights / obj_ids.shape[0]
self.datadict.update({"weights": weights})
if self.flow_in_ram:
self.logger.warn(f"Load flow maps in RAM... please make sure to have enough capacity there.")
assert len(self.valid_lags) == 1
self.loaded_flows = parallel_data_prefetch(self._read_flows, self.datadict["flow_paths"][:,self.valid_lags[0]],n_proc=72,cpu_intensive=True)
assert self.loaded_flows.shape[0] == self.datadict["img_path"].shape[0]
if self.imgs_in_ram:
self.logger.warn(f"Load images in RAM... please make sure to have enough capacity there.")
self.loaded_imgs = parallel_data_prefetch(self._read_imgs, self.datadict["img_path"],n_proc=72,cpu_intensive=True)
assert self.loaded_imgs.shape[0] == self.datadict["img_path"].shape[0]
if google_imgs:
img_paths = [p for p in glob(path.join(self.datapath,"google_images", "*")) if path.isfile(p) and any(map(lambda x: p.endswith(x), ["jpg", "jpeg", "png"]))]
self.datadict["img_path"] = np.asarray(img_paths)
self.logger.info("Use images from Google.")
msg = "Flow normalization enabled!" if self.normalize_flows else "Flow normalization disabled!"
self.logger.info(
f'Initialized {self.__class__.__name__} in "{msg}"-mode. Dataset consists of {self.__len__()} samples. ' + msg
)
def _set_instance_specific_values(self):
# set flow cutoff to 0.2 as this seems to be a good heuristic for Plants
self.valid_lags = [0]
self.flow_cutoff = 0.4
self.extended_annotations = False
self.subsample_step = 2
self.min_frames = 5
self.obj_weighting = True
if not 8 in self.excluded_objects:
self.excluded_objects.append(8)
self.metafilename = "meta"
# self.metafilename = 'test_codeprep_metadata'
def _read_flows(self,data):
read_flows = []
flow_paths = data
def proc_flow(flow):
org_shape = float(flow.shape[-1])
dsize = None
if "spatial_size" in self.config:
dsize = self.config["spatial_size"]
elif "resize_factor" in self.config:
dsize = (
int(float(flow.shape[1]) / self.config["resize_factor"]),
int(float(flow.shape[2]) / self.config["resize_factor"]),
)
flow = F.interpolate(
torch.from_numpy(flow).unsqueeze(0), size=dsize, mode="bilinear", align_corners=True
).numpy()
flow = flow / (org_shape / dsize[0])
return flow
for i, flow_path in enumerate(tqdm(flow_paths)):
try:
f = np.load(flow_path)
f = proc_flow(f)
except ValueError:
try:
f = np.load(flow_path, allow_pickle=True)
f = proc_flow(f)
except Exception as ex:
self.logger.error(ex)
read_flows.append("None")
continue
except:
self.logger.error("Fallback error ocurred. Append None and continue")
read_flows.append("None")
continue
read_flows.append(f)
return np.concatenate(read_flows,axis=0)
def _read_imgs(self,imgs):
read_imgs = []
for img_path in tqdm(imgs):
img = cv2.imread(img_path)
# image is read in BGR
img = preprocess_image(img, swap_channels=True)
img = cv2.resize(
img, self.config["spatial_size"], cv2.INTER_LINEAR
)
read_imgs.append(img)
return read_imgs
def _make_split(self,data):
vids = np.unique(self.data["vid"])
split_data = {"train": {}, "test": {}}
if self.split == "videos":
# split such that some videos are held back for testing
self.logger.info("Splitting data after videos")
shuffled_vids = deepcopy(vids)
np.random.shuffle(shuffled_vids)
train_vids = shuffled_vids[: int(0.8 * shuffled_vids.shape[0])]
train_indices = np.nonzero(np.isin(data["vid"], train_vids))[0]
test_indices = np.nonzero(np.logical_not(train_indices))[0]
split_data["train"] = {
key: data[key][train_indices] for key in data
}
split_data["test"] = {
key: data[key][test_indices] for key in data
}
else:
self.logger.info(f"splitting data across_videos")
train_indices = np.asarray([],dtype=np.int)
test_indices = np.asarray([], dtype=np.int)
for vid in vids:
indices = np.nonzero(data["vid"] == vid)[0]
# indices = np.arange(len(tdata["img_path"]))
# np.random.shuffle(indices)
train_indices = np.append(train_indices,indices[: int(0.8 * indices.shape[0])])
test_indices = np.append(test_indices,indices[int(0.8 * indices.shape[0]) :])
split_data["train"] = {
key: data[key][train_indices] for key in data
}
split_data["test"] = {
key: data[key][test_indices] for key in data
}
return split_data, train_indices, test_indices
class VegetationDataset(PlantDataset):
def _set_instance_specific_values(self):
self.filter_flow = False
self.valid_lags = [0]
self.flow_cutoff = .3
self.min_frames = 5
self.subsample_step = 2
# self.datapath = "/export/data/ablattma/Datasets/vegetation_new/"
self.metafilename = "meta"
self.datadict.update({"train": []})
self.obj_weighting = True
# set flow_weights to false
self.flow_weights = False
def _make_split(self,data):
split_data = {"train":{},"test":{}}
train_ids = np.flatnonzero(data["train"])
test_ids = np.flatnonzero(np.logical_not(data["train"]))
assert np.intersect1d(train_ids,test_ids).size == 0
split_data["train"] = {
key: data[key][train_ids] for key in data
}
split_data["test"] = {
key: data[key][test_ids] for key in data
}
return split_data, train_ids, test_ids
class TaichiDataset(VegetationDataset):
def _set_instance_specific_values(self):
self.filter_flow = True
self.valid_lags = [1]
self.flow_cutoff = .1
self.min_frames = 5
self.subsample_step = 2
# self.datapath = "/export/scratch/compvis/datasets/taichi/taichi/"
self.metafilename = 'meta'
self.datadict.update({"train": []})
self.obj_weighting = False
# set flow_weights to false
self.flow_weights = self.config["flow_weights"] if "flow_weights" in self.config else True
self.flow_width_factor = 5
self.target_lags = [10,20]
class LargeVegetationDataset(VegetationDataset):
def _set_instance_specific_values(self):
self.filter_flow = False
self.valid_lags = [0]
self.flow_cutoff = .1
self.min_frames = 5
self.subsample_step = 2
# self.datapath = "/export/scratch/compvis/datasets/plants/processed_256_resized/"
self.metafilename = "meta"
self.datadict.update({"train": []})
self.excluded_objects = [1,2,3]
self.obj_weighting = True
class IperDataset(PlantDataset):
def _set_instance_specific_values(self):
self.filter_flow = True
self.flow_width_factor = 5
self.valid_lags = [0]
# set flow cutoff to 0.45 as this seems to be a good heuristic for Iper
self.flow_cutoff = 0.6
self.min_frames = 5
# self.datapath = "/export/scratch/compvis/datasets/iPER/processed_256_resized/"
self.metafilename = 'meta' #"test_codeprep_metadata"
self.datadict.update({"actor_id": [], "action_id": []})
# set object weighting always to false
self.obj_weighting = False
self.flow_weights = self.config["flow_weights"] if "flow_weights" in self.config else True
self.use_flow_for_weights = False
def _make_split(self,data):
split_data = {"train": {}, "test": {}}
if self.split == "videos":
key = "vid"
elif self.split == "objects":
key = "object_id"
elif self.split == "actions":
key = "action_id"
elif self.split == "actors":
key = "actor_id"
elif self.split == "official":
# this is the official train test split as in the original paper
with open(path.join("/".join(self.datapath.split("/")[:-1]),"train.txt"),"r") as f:
train_names = f.readlines()
train_indices = np.asarray([],dtype=np.int)
for n in train_names:
n = n.replace("/","_").rstrip()
train_indices = np.append(train_indices,np.flatnonzero(np.char.find(data["img_path"],n) != -1))
train_indices = np.sort(train_indices)
test_indices = np.flatnonzero(np.logical_not(np.isin(np.arange(data["img_path"].shape[0]),train_indices)))
split_data["train"] = {
key: data[key][train_indices] for key in data
}
split_data["test"] = {
key: data[key][test_indices] for key in data
}
return split_data, train_indices, test_indices
else:
vids = np.unique(self.data["vid"])
self.logger.info(f"splitting data across_videos")
train_indices = np.asarray([], dtype=np.int)
test_indices = np.asarray([], dtype=np.int)
for vid in vids:
indices = np.nonzero(data["vid"] == vid)[0]
# indices = np.arange(len(tdata["img_path"]))
# np.random.shuffle(indices)
train_indices = np.append(train_indices, indices[: int(0.8 * indices.shape[0])])
test_indices = np.append(test_indices, indices[int(0.8 * indices.shape[0]):])
split_data["train"] = {
key: data[key][train_indices] for key in data
}
split_data["test"] = {
key: data[key][test_indices] for key in data
}
return split_data, train_indices, test_indices
# split such that some objects are held back for testing
self.logger.info(f"Splitting data after {key}")
ids = np.unique(data[key])
shuffled_ids = deepcopy(ids)
np.random.shuffle(shuffled_ids)
train_ids = shuffled_ids[: int(0.8 * shuffled_ids.shape[0])]
train_indices = np.flatnonzero(np.isin(data[key], train_ids))
test_indices = np.flatnonzero(np.logical_not(np.isin(np.arange(self.data["img_path"].shape[0]),train_indices)))
train_indices = np.sort(train_indices)
test_indices = np.sort(test_indices)
split_data["train"] = {
key: data[key][train_indices] for key in data
}
split_data["test"] = {
key: data[key][test_indices] for key in data
}
return split_data, train_indices, test_indices
class Human36mDataset(PlantDataset):
def _set_instance_specific_values(self):
self.valid_lags = [1]
self.flow_cutoff = 0.3
self.min_frames = 5
self.subsample_step = 2
# self.datapath = "/export/scratch/compvis/datasets/human3.6M/video_prediction"
self.metafilename = "meta"
self.datadict.update({"actor_id": [], "action_id": [], "train": []})
# set object weighting always to false
self.obj_weighting = False
self.filter_flow = False
self.flow_width_factor = 5
self.flow_weights = True
self.use_flow_for_weights = True
self.use_lanczos = True
def _make_split(self,data):
split_data = {"train": {}, "test": {}}
if self.split == "official":
train_ids = np.flatnonzero(data["train"])
test_ids = np.flatnonzero(np.logical_not(data["train"]))
assert np.intersect1d(train_ids, test_ids).size == 0
split_data["train"] = {
key: data[key][train_ids] for key in data
}
split_data["test"] = {
key: data[key][test_ids] for key in data
}
return split_data, train_ids, test_ids
elif self.split == "gui":
vids = np.unique(self.data["vid"])
self.logger.info(f"splitting data across_videos")
train_indices = np.asarray([], dtype=np.int)
test_indices = np.asarray([], dtype=np.int)
for vid in vids:
indices = np.nonzero(data["vid"] == vid)[0]
# indices = np.arange(len(tdata["img_path"]))
# np.random.shuffle(indices)
train_indices = np.append(train_indices, indices[: int(0.8 * indices.shape[0])])
test_indices = np.append(test_indices, indices[int(0.8 * indices.shape[0]):])
split_data["train"] = {
key: data[key][train_indices] for key in data
}
split_data["test"] = {
key: data[key][test_indices] for key in data
}
return split_data, train_indices, test_indices
else:
raise ValueError(f'Specified split type "{self.split}" is not valid for Human36mDataset.')
class GoogleImgDataset(Dataset, LoggingParent):
def __init__(self, base_dir, config,):
Dataset.__init__(self)
LoggingParent.__init__(self)
self.logger.info(f"Initialize GoogleImgDataset with basepath {base_dir}")
self.config = config
img_paths = [p for p in glob(path.join(base_dir,"*")) if path.isfile(p) and any(map(lambda x: p.endswith(x),["jpg","jpeg","png"]))]
self.datadict = {"img_path": np.asarray(img_paths)}
self.transforms = tt.Compose(
[
tt.ToTensor(),
tt.Lambda(lambda x: (x * 2.0) - 1.0),
])
self.logger.info(f"Initialized Dataset with {self.__len__()} images")
def __getitem__(self, idx):
return self.datadict["img_path"][idx]
def __len__(self):
return self.datadict["img_path"].shape[0]
if __name__ == "__main__":
import yaml
import torch
from torchvision import transforms as tt
from torch.utils.data import DataLoader, RandomSampler
import cv2
from os import makedirs
from tqdm import tqdm
from data import get_dataset
from data.samplers import SequenceSampler, SequenceLengthSampler
from utils.testing import make_video, make_flow_grid
from utils.general import get_patches
seed = 42
torch.manual_seed(42)
torch.cuda.manual_seed(42)
np.random.seed(42)
# random.seed(opt.seed)
torch.backends.cudnn.deterministic = True
torch.manual_seed(42)
rng = np.random.RandomState(42)
# load config
fpath = path.dirname(path.realpath(__file__))
configpath = path.abspath(path.join(fpath, "../config/test_config.yaml"))
with open(configpath, "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
transforms = tt.Compose(
[tt.ToTensor(), tt.Lambda(lambda x: (x * 2.0) - 1.0)]
)
datakeys = ["images", "img_aT", "img_sT", "app_img_cmp", "app_img_random","flow", "poke"]
make_overlay = config["general"]["overlay"]
# generate dataset
dset, transforms = get_dataset(config["data"],transforms)
test_dataset = dset(transforms, datakeys, config["data"],train=True)
save_dir = f"test_data/{test_dataset.__class__.__name__}"
makedirs(save_dir, exist_ok=True)
print(test_dataset.datapath)
if test_dataset.yield_videos:
def init_fn(worker_id):
return np.random.seed(np.random.get_state()[1][0] + worker_id)
if test_dataset.var_sequence_length:
sampler = SequenceLengthSampler(test_dataset,shuffle=True,drop_last=False, batch_size=config["training"]["batch_size"],zero_poke=config["data"]["include_zeropoke"])
loader = DataLoader(test_dataset, batch_sampler=sampler, num_workers=config["data"]["num_workers"], worker_init_fn=init_fn)
else:
sampler = RandomSampler(test_dataset)
loader = DataLoader(test_dataset,batch_size=config["training"]["batch_size"], sampler=sampler,num_workers=config["data"]["num_workers"],
worker_init_fn=init_fn, drop_last= True)
n_logged = config["testing"]["n_logged"]
for i, batch in enumerate(tqdm(loader)):
if i >200:
break
imgs = batch["images"][:n_logged]
src_img = imgs[:,0]
tgt_img = imgs[:,-1]
flow = batch["flow"][:n_logged]
poke = batch["poke"][:n_logged][0] if test_dataset.flow_weights else batch["poke"][:n_logged]
weights = batch["poke"][:n_logged][1] if test_dataset.flow_weights else None
postfix = "weighted" if config["data"]["object_weighting"] else "unweighted"
if weights is not None:
imgs = get_patches(imgs,weights,config["data"],test_dataset.weight_value_flow)
postfix = postfix + "_patched"
out_vid = make_video(imgs[:,0],poke,imgs,imgs,n_logged=min(n_logged,config["training"]["batch_size"]),flow=flow,logwandb=False, flow_weights=weights)
warping_test = make_flow_grid(src_img,flow,tgt_img,tgt_img,n_logged=min(n_logged,config["training"]["batch_size"]))
warping_test = cv2.cvtColor(warping_test,cv2.COLOR_RGB2BGR)
cv2.imwrite(path.join(save_dir,f'warping_test-{i}.png'),warping_test)
savename = path.join(save_dir,f"vid-grid-{i}-{postfix}.mp4")
writer = cv2.VideoWriter(
savename,
cv2.VideoWriter_fourcc(*"MP4V"),
5,
(out_vid.shape[2], out_vid.shape[1]),
)
# writer = vio.FFmpegWriter(savename,inputdict=inputdict,outputdict=outputdict)
for frame in out_vid:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
writer.write(frame)
writer.release()
else:
sampler = SequenceSampler(test_dataset, batch_size=config["training"]["batch_size"], shuffle=False, drop_last=False)
loader = DataLoader(test_dataset, batch_sampler=sampler, num_workers=config["data"]["num_workers"])
#assert sampler.batch_size == 1
postfix = "filt" if test_dataset.filter_flow else "nofilt "
for i, batch in enumerate(tqdm(loader)):
if i > 200:
break
batch = {key: batch[key].squeeze(0) if not isinstance(batch[key],list) else [e.squeeze(0) for e in batch[key]] for key in batch}
src_img = batch["images"][0]
tgt_img = batch["images"][-1]
# vis augmented images
img_aT = batch["img_aT"][0]
img_sT = batch["img_sT"]
img_dis = batch["app_img_random"]
img_cmp = batch["app_img_cmp"]
# # vis flow
flow_map = batch["flow"].permute(1, 2, 0).cpu().numpy()
flow_map -= flow_map.min()
flow_map /= flow_map.max()
flow_map = (flow_map * 255.0).astype(np.uint8)
# vis poke
poke = batch["poke"][0].permute(1, 2, 0).cpu().numpy() if test_dataset.flow_weights else batch["poke"].permute(1, 2, 0).cpu().numpy()
if test_dataset.flow_weights:
weight_map = batch["poke"][1].cpu().numpy()
weight_map = ((weight_map - weight_map.min()) / weight_map.max() * 255.).astype(np.uint8)
heatmap = cv2.applyColorMap(weight_map, cv2.COLORMAP_HOT)
heatmap = cv2.cvtColor(heatmap, cv2.COLOR_RGB2BGR)
# visualize poke patch in flow map as white region
flow_map = np.where((poke**2).sum(-1,keepdims=True)>0, np.full_like(flow_map, 255), flow_map)
poke -= poke.min()
poke /= poke.max()
poke = (poke * 255.0).astype(np.uint8)
# vis inverted flow
# flow_map_inv = batch["flow_inv"].permute(1, 2, 0).cpu().numpy()
# flow_map_inv -= flow_map_inv.min()
# flow_map_inv /= flow_map_inv.max()
# flow_map_inv = (flow_map_inv * 255.0).astype(np.uint8)
# vis images
src_img = (
((src_img.permute(1, 2, 0).cpu() + 1) * 127.5)
.numpy()
.astype(np.uint8)
)
tgt_img = (
((tgt_img.permute(1, 2, 0).cpu() + 1) * 127.5)
.numpy()
.astype(np.uint8)
)
img_aT = ((img_aT.permute(1, 2, 0).cpu() + 1) * 127.5).numpy().astype(np.uint8)
img_sT = ((img_sT.permute(1, 2, 0).cpu() + 1) * 127.5).numpy().astype(np.uint8)
img_dis = ((img_dis.permute(1, 2, 0).cpu() + 1) * 127.5).numpy().astype(np.uint8)
img_cmp = ((img_cmp.permute(1, 2, 0).cpu() + 1) * 127.5).numpy().astype(np.uint8)
if make_overlay:
overlay = cv2.addWeighted(src_img,0.5,tgt_img,0.5,0)
else:
tgt_img = [tgt_img,heatmap] if test_dataset.flow_weights else [tgt_img]
zeros = np.expand_dims(np.zeros_like(flow_map).sum(2), axis=2)
flow_map = np.concatenate([flow_map, zeros], axis=2)
poke = np.concatenate([poke, zeros], axis=2)
# flow_map_inv = np.concatenate([flow_map_inv,zeros],axis=2)
if make_overlay:
grid = np.concatenate([src_img, *tgt_img,overlay, img_sT, img_aT, img_dis, img_cmp, flow_map, poke], axis=1).astype(np.uint8)
else:
grid = np.concatenate([src_img, *tgt_img, img_sT, img_aT, img_dis, img_cmp, flow_map, poke], axis=1).astype(np.uint8)
grid = cv2.cvtColor(grid,cv2.COLOR_BGR2RGB)
cv2.imwrite(path.join(save_dir, f"test_grid_{i}-{postfix}.png"), grid)
| 32,596 | 41.947299 | 192 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/data/helper_functions.py | import cv2
def preprocess_image(img,swap_channels=False):
"""
:param img: numpy array of shape (H,W,3)
:param swap_channels: True, if channelorder is BGR
:return:
"""
if swap_channels:
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
# this seems to be possible as flownet2 outputs only images which can be divided by 64
shape = img.shape
img = img[:int(shape[0] / 64) * 64,:int(shape[1] / 64) * 64]
return img | 456 | 24.388889 | 90 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/data/base_dataset.py | from functools import partial
from itertools import chain
import torch
from torch.nn import functional as F
from torch.utils.data import Dataset
from torchvision import transforms as T
from torchvision.transforms import functional as FT
from PIL import Image
import numpy as np
from abc import abstractmethod
import cv2
from utils.general import convert_flow_2d_to_3d, get_flow_gradients
from data.helper_functions import preprocess_image
from utils.general import LoggingParent
class FlowError(Exception):
"""Raises an exception when no valid flow file could be found
"""
def __init__(self, path, msg=None):
if msg is None:
message = f'Could not load flow file "{path}" neither with "allow_pickle=False" nor with "allow_pickle=True". Considering different sequence....'
else:
message = msg
super().__init__(message)
class BaseDataset(Dataset, LoggingParent):
def __init__(self, transforms, datakeys: list, config: dict, train=True):
Dataset.__init__(self)
LoggingParent.__init__(self)
# list of keys for the data that shall be retained
assert len(datakeys) > 0
self.datakeys = datakeys
# torchvision.transforms
self.transforms = transforms
# config: contains all relevant configuration parameters
self.config = config
self.train = train
assert "spatial_size" in self.config
self.datapath = self.config['datapath']
# self.valid_lags = np.unique(self.config["valid_lags"]) if "valid_lags" in self.config else list(range(6))
self.yield_videos = self.config["yield_videos"] if "yield_videos" in self.config else False
# everything, which has to deal with variable sequence lengths
self.var_sequence_length = self.config["var_sequence_length"] if "var_sequence_length" in self.config and self.yield_videos else False
self.longest_seq_weight = self.config["longest_seq_weight"] if "longest_seq_weight" in self.config else None
self.scale_poke_to_res = self.config["scale_poke_to_res"] if "scale_poke_to_res" in self.config else False
if self.scale_poke_to_res:
self.logger.info(f'Scaling flows and pokes to dataset resolution, which is {self.config["spatial_size"]}')
self.logger.info(f'Dataset is yielding {"videos" if self.yield_videos else "images"}.')
self.poke_size = self.config["poke_size"] if "poke_size" in self.config else self.config["spatial_size"][0] / 128 * 10
if "poke" in self.datakeys:
self.logger.info(f"Poke size is {self.poke_size}.")
# for flow filtering: default values are such that nothing changes
self.filter_flow = False
self.flow_width_factor = None
# whether fancy appearance augmentation shall be used or not
self.fancy_aug = self.config["fancy_aug"] if "fancy_aug" in self.config else False
# flow weighting, if intended to be enabled
self.flow_weights = self.config["flow_weights"] if "flow_weights" in self.config else False
self.weight_value_flow = self.config["foreground_value"] if "foreground_value" in self.config else 1.
self.weight_value_poke = self.config["poke_value"] if "poke_value" in self.config else 1.
self.weight_value_bg = self.config["background_weight"] if "background_weight" in self.config else 1.
# whether to use only one value in for poke or the complete flow field within that patch
self.equal_poke_val = self.config["equal_poke_val"] if "equal_poke_val" in self.config else True
# Whether or not to normalize the flow values
self.normalize_flows = self.config["normalize_flows"] if "normalize_flows" in self.config else False
# Whether to weight different objects (i.e. samples with different object_ids) the way that the should be yield equally often (recommended for imbalanced datasets)
self.obj_weighting = self.config["object_weighting"] if "object_weighting" in self.config else False
self.p_col= self.config["p_col"] if "p_col" in self.config else 0
self.p_geom = self.config["p_geom"] if "p_geom" in self.config else 0
self.ab = self.config["augment_b"] if "augment_b" in self.config else 0
self.ac = self.config["augment_c"] if "augment_c" in self.config else 0
self.ah = self.config["augment_h"] if "augment_h" in self.config else 0
self.a_s = self.config["augment_s"] if "augment_s" in self.config else 0
self.ad = self.config["aug_deg"] if "aug_deg" in self.config else 0
self.at = self.config["aug_trans"] if "aug_trans" in self.config else (0,0)
self.use_lanczos = self.config["use_lanczos"] if "use_lanczos" in self.config else False
self.pre_T = T.ToPILImage()
self.z1_normalize = "01_normalize" in self.config and self.config["01_normalize"]
if self.z1_normalize:
self.post_T = T.Compose([T.ToTensor(),])
else:
self.post_T = T.Compose([T.ToTensor(),T.Lambda(lambda x: (x * 2.0) - 1.0)])
self.post_edges = T.Compose([T.ToTensor()])
# key:value mappings for every datakey in self.datakeys
self._output_dict = {
"images": [partial(self._get_imgs)],
"poke": [self._get_poke],
"flow": [self._get_flow],
"img_aT": [partial(self._get_imgs,use_fb_aug = self.fancy_aug), ["color"]],
"img_sT": [partial(self._get_imgs,sample=True),["geometry"]],
"app_img_random": [self._get_transfer_img],
"app_img_dis": [partial(self._get_imgs, sample=True), ["color", "geometry"]],
"app_img_cmp": [self._get_transfer_img],
"flow_3D": [self._get_3d_flow],
"poke_3D": [self._get_3d_poke],
"edge_image": [self._get_edge_image],
"edge_flow": [self._get_edge_flow],
"flow_3D_series": [self._get_flow_series],
"image_series": [self._get_image_series]
}
if self.fancy_aug:
assert "app_img_dis" not in self.datakeys
# the data that's held by the dataset
self.datadict = {
"img_path": [],
"flow_paths": [],
"img_size": [],
"flow_size": [],
"vid": [],
"fid": [],
"object_id": [],
# "original_id": [],
"flow_range": []
}
self.max_frames = self.config["max_frames"] if "max_frames" in self.config else 1
self.augment = self.config["augment_wo_dis"] if ("augment_wo_dis" in self.config and self.train) else False
self.color_transfs = None
self.geom_transfs = None
self.subsample_step = 1
self.min_frames = None
# sequence start and end ids are related to the entire dataset and so is self.img_paths
self.eids_per_seq = {}
self.sids_per_seq = {}
self.seq_len_T_chunk = {}
self.max_trials_flow_load = 50
#self.img_paths = {}
self.mask=None
self.flow_norms = None
self.flow_in_ram = False
self.imgs_in_ram = False
self.outside_length = None
self.loaded_flows = []
self.loaded_imgs = []
self.valid_lags = None
self.ids_per_seq_len = {}
self.object_weights_per_seq_len = {}
if "weight_zeropoke" in self.config and "include_zeropoke" in self.config:
self.zeropoke_weight = max(1.,float(self.max_frames) / 5) if self.config["weight_zeropoke"] and self.config["include_zeropoke"] else 1.
else:
self.zeropoke_weight = 1.
# this is the value, which will be the upper bound for all normalized optical flows, when training on variable sequence lengths
# per default, set to 1 here (max) can be adapted, if necessary, in the subclass of base dataset
self.flow_cutoff = 1.
self.valid_h = [self.poke_size, self.config["spatial_size"][0] - self.poke_size]
self.valid_w = [self.poke_size, self.config["spatial_size"][1] - self.poke_size]
self.use_flow_for_weights = False
def __getitem__(self, idx):
"""
:param idx: The idx is here a tuple, consisting of the actual id and the sampled lag for the flow in the respective iteration
:return:
"""
# collect outputs
data = {}
transforms = {"color": self._get_color_transforms(), "geometry" : self._get_geometric_transforms()}
self.color_transfs = self._get_color_transforms() if self.augment else None
self.geom_transfs = self._get_geometric_transforms() if self.augment else None
# sample id (in case, sample is enabled)
if self.var_sequence_length:
idx = self._get_valid_ids(*idx)
else:
idx = self._get_valid_ids(length=None,index=idx)
sidx = int(np.random.choice(np.flatnonzero(self.datadict["vid"] == self.datadict["vid"][idx[0]]), 1))
tr_vid = int(np.random.choice(self.datadict["vid"][self.datadict["vid"] != self.datadict["vid"][idx[0]]], 1))
for i in range(self.max_trials_flow_load):
self.mask = {}
try:
self._get_mask(idx)
data = {key: self._output_dict[key][0](idx, sample_idx = sidx,
transforms = chain.from_iterable([transforms[tkey] for tkey in self._output_dict[key][1]]) if len(self._output_dict[key])>1 else None,
transfer_vid= tr_vid) for key in self.datakeys}
break
except FlowError as fe:
self.logger.error(fe)
# sample new id and try again
img_id = int(np.random.choice(np.arange(self.datadict["img_path"].shape[0]),1))
# don't change lag
idx = (img_id,idx[1])
if len(data) == 0:
raise IOError(f"Errors in flow files loading...tried it {self.max_trials_flow_load} times consecutively without success.")
return data
def _get_valid_ids(self,length,index = None):
"""
:param length: The sequence length (or flow step, depending on whether var_sequence_length is True or False)
:param index: The id correspinding to the
:return:
"""
# we need to do the following things:
# take care, that choose one start id from all samples, which have the appropriate flow_magnitude and result in sequences which are within the same video
if self.var_sequence_length:
#ids = np.flatnonzero(np.logical_and(self.datadict["flow_range"][:,1]>self.seq_len_T_chunk[length],np.less_equal(np.arange(self.datadict["img_path"].shape[0]) + self.min_seq_length[0] + length*self.subsample_step,self.datadict["seq_end_id"])))
if length == -1:
# use maximum sequence length for such cases
# length = int(np.random.choice(np.arange(self.max_frames),1))
# in case length == -1: index corresponds to actual sampled length for the regarded batch
self.outside_length = index
start_id = int(np.random.choice(self.ids_per_seq_len[self.outside_length], 1))
else:
ids = self.ids_per_seq_len[length]
if self.obj_weighting:
start_id = int(np.random.choice(ids, 1, p=self.object_weights_per_seq_len[length]))
else:
start_id = int(np.random.choice(ids, 1))
else:
if index == -1:
length = -1
if self.obj_weighting:
index = int(np.random.choice(np.arange(self.datadict["object_id"].shape[0]),p=self.datadict["weights"],size=1))
else:
index = int(np.random.choice(np.arange(self.datadict["object_id"].shape[0]), p=self.datadict["weights"], size=1))
max_id_fid = self.sids_per_seq[self.datadict["vid"][index]] + self.datadict["max_fid"][index,self.valid_lags[0]] - 1
start_id = min(min(index,self.datadict["seq_end_id"][index]-(self.max_frames* self.subsample_step) - 1),max_id_fid)
return (start_id,length)
def _get_3d_flow(self, ids, **kwargs):
flow = self._get_flow(ids)
flow = convert_flow_2d_to_3d(flow)
return flow
def _get_3d_poke(self, ids, **kwargs):
flow = self._get_poke(ids)
flow = convert_flow_2d_to_3d(flow)
return flow
def _get_edge_image(self, ids, sample_idx, transforms=None, sample=False, use_fb_aug=False, **kwargs):
imgs = []
if sample:
yield_ids = [sample_idx]
else:
yield_ids = self._get_yield_ids(ids)
for i,idx in enumerate(yield_ids):
img_path = self.datadict["img_path"][idx]
img = cv2.imread(img_path)
# image is read in BGR
img = preprocess_image(img, swap_channels=True)
img = cv2.resize(
img, self.config["spatial_size"], cv2.INTER_LINEAR
)
# transformations
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gradient = cv2.Sobel(img/255, cv2.CV_64F, 1, 0, ksize=3)
gradient = self.post_edges(gradient)[0]
imgs.append(gradient)
gradient = cv2.Sobel(img/255, cv2.CV_64F, 0, 1, ksize=3)
gradient = self.post_edges(gradient)[0]
imgs.append(gradient)
return torch.stack(imgs, dim=0).squeeze(dim=0)
def _get_edge_flow(self, ids, **kwargs):
flow_path = self.datadict["flow_paths"][ids[0], self.valid_lags[0]]
# debug, this path seems to be erroneous
# flow_path = "/export/data/ablattma/Datasets/plants/processed_crops/VID_0_3_1024x1024/prediction_3_28.flow.npy"
try:
flow = np.load(flow_path)
except ValueError:
try:
flow = np.load(flow_path,allow_pickle=True)
except Exception as ex:
print(ex)
raise FlowError(flow_path)
except:
raise FlowError(flow_path)
dsize = None
if "spatial_size" in self.config:
dsize = self.config["spatial_size"]
elif "resize_factor" in self.config:
dsize = (
int(float(flow.shape[1]) / self.config["resize_factor"]),
int(float(flow.shape[2]) / self.config["resize_factor"]),
)
flow = F.interpolate(
torch.from_numpy(flow).unsqueeze(0), size=dsize, mode="nearest"
).squeeze(0)
if self.config["predict_3D"]:
flow = convert_flow_2d_to_3d(flow)
gradient_d1_x, gradient_d1_y, gradient_d2_x, gradient_d2_y = get_flow_gradients(flow)
all_gradients = [gradient_d1_x,
gradient_d1_y,
gradient_d2_x,
gradient_d2_y]
return torch.stack(all_gradients, dim=0).squeeze(dim=0)
def _get_transfer_img(self, ids, transfer_vid,**kwargs):
imgs=[]
yield_ids = [int(np.random.choice(np.flatnonzero(self.datadict["vid"] == transfer_vid), 1))]
for idx in yield_ids:
img_path = self.datadict["img_path"][idx]
img = cv2.imread(img_path)
# image is read in BGR
img = preprocess_image(img, swap_channels=True)
if "spatial_size" in self.config:
img = cv2.resize(
img, self.config["spatial_size"], cv2.INTER_LINEAR
)
elif "resize_factor" in self.config:
dsize = (
int(float(img.shape[1]) / self.config["resize_factor"]),
int(float(img.shape[0]) / self.config["resize_factor"]),
)
img = cv2.resize(img, dsize, interpolation=cv2.INTER_LINEAR)
# transformations
img = self.pre_T(img)
img = self.post_T(img)
imgs.append(img)
return torch.stack(imgs, dim=0).squeeze(dim=0)
def _compute_mask(self,target_id):
img = self._get_imgs([], sample_idx=target_id, sample=True)
if self.z1_normalize:
img = (img.permute(1, 2, 0).numpy() * 255.).astype(np.uint8)
else:
img = ((img.permute(1, 2, 0).numpy() + 1.) * 127.5).astype(np.uint8)
mask = np.zeros(img.shape[:2], np.uint8)
# rect defines starting background area
rect = (int(img.shape[1] / self.flow_width_factor), int(self.valid_h[0]), int((self.flow_width_factor - 2) / self.flow_width_factor * img.shape[1]), int(self.valid_h[1] - self.valid_h[0]))
# initialize background and foreground models
fgm = np.zeros((1, 65), dtype=np.float64)
bgm = np.zeros((1, 65), dtype=np.float64)
# apply grab cut algorithm
mask2, fgm, bgm = cv2.grabCut(img, mask, rect, fgm, bgm, 5, cv2.GC_INIT_WITH_RECT)
return mask2
def _compute_mask_with_flow(self,target_id):
flow = self._get_flow([target_id])
amplitude = torch.norm(flow, 2, dim=0)
amplitude -= amplitude.min()
amplitude /= amplitude.max()
# use only such regions where the amplitude is larger than mean + 1 * std
mask = torch.where(torch.gt(amplitude,amplitude.mean()+amplitude.std()),torch.ones_like(amplitude),torch.zeros_like(amplitude)).numpy().astype(np.bool)
return mask
def _get_mask(self,ids):
if self.filter_flow or self.fancy_aug or (self.flow_weights and self.yield_videos):
if self.use_flow_for_weights:
mask_src = self._compute_mask_with_flow(ids[0])
self.mask.update({"img_start": mask_src})
else:
mask_src = self._compute_mask(ids[0])
self.mask.update({"img_start" : np.where((mask_src == 2) | (mask_src == 0), 0, 1).astype(np.bool)})
if self.flow_weights:
yield_ids = self._get_yield_ids(ids)
tgt_id = yield_ids[-1]
if self.use_flow_for_weights:
mask_tgt = self._compute_mask_with_flow(tgt_id)
self.mask.update({"img_tgt": mask_tgt})
else:
mask_tgt = self._compute_mask(tgt_id)
self.mask.update({"img_tgt": np.where((mask_tgt == 2) | (mask_tgt == 0), 0, 1).astype(np.bool)})
if self.yield_videos:
mid_id = int((len(list(yield_ids))+yield_ids[0]) / 2)
if self.use_flow_for_weights:
mask_mid = self._compute_mask_with_flow(mid_id)
self.mask.update({"img_mid": mask_mid})
else:
mask_mid = self._compute_mask(mid_id)
self.mask.update({"img_mid": np.where((mask_mid == 2) | (mask_mid == 0), 0, 1).astype(np.bool)})
def _get_yield_ids(self,ids):
start_id = ids[0]
if self.yield_videos:
if ids[-1] == -1:
if self.var_sequence_length:
n_frames = self.min_frames + self.outside_length
yield_ids = np.stack([start_id]* n_frames,axis=0).tolist()
else:
yield_ids = np.stack([start_id]* (self.max_frames+1),axis=0).tolist()
else:
yield_ids = range(start_id, start_id + (self.min_frames + ids[-1]) * self.subsample_step + 1 ,self.subsample_step) \
if self.var_sequence_length else range(start_id, start_id + self.max_frames * self.subsample_step + 1, self.subsample_step)
else:
yield_ids = (start_id, start_id + (self.valid_lags[0] + 1) * 5)
return yield_ids
def _get_image_series(self, ids, step_width=10, **kwargs):
all_imgs = []
for i in range(1, step_width+1):
new_ids = (ids[0] + i * (1 + self.valid_lags[0]) * 5, ids[1])
flow = self._get_imgs(new_ids, None)
all_imgs.append(flow)
return torch.from_numpy(np.stack(all_imgs, axis=0))
# grabs a series of images
def _get_imgs(self, ids, sample_idx, transforms=None, sample=False, use_fb_aug=False, **kwargs):
imgs = []
if sample:
yield_ids = [sample_idx]
else:
# avoid generating the entire sequence for the color transformed image
if transforms is not None and self._get_color_transforms in transforms and not sample:
yield_ids = [ids[0]]
else:
yield_ids = self._get_yield_ids(ids)
for i,idx in enumerate(yield_ids):
faug = use_fb_aug and (i == 0 or i == len(yield_ids) - 1)
if self.imgs_in_ram:
img = self.loaded_imgs[idx]
else:
img_path = self.datadict["img_path"][idx]
img = cv2.imread(img_path)
img = preprocess_image(img, swap_channels=True)
# image is read in BGR
if self.use_lanczos and self.config["spatial_size"] == 64:
img = np.array(Image.fromarray(img).resize(self.config["spatial_size"], resample=Image.LANCZOS))
else:
img = cv2.resize(
img, self.config["spatial_size"], cv2.INTER_LINEAR
)
# transformations
img = self.pre_T(img)
if transforms is not None:
for t in transforms:
img = t(img)
if faug:
bts = self._get_color_transforms()
img_back = img
for bt in bts:
img_back = bt(img_back)
img_back = self.post_T(img_back)
else:
if self.color_transfs is not None:
for t in self.color_transfs:
img = t(img)
if self.geom_transfs is not None:
for t in self.geom_transfs:
img = t(img)
img = self.post_T(img)
if faug:
img = torch.where(torch.from_numpy(self.mask["img_start"]).unsqueeze(0),img,img_back)
imgs.append(img)
return torch.stack(imgs, dim=0).squeeze(dim=0)
# extracts pokes as flow patches
def _get_poke(self, ids, **kwargs):
seq_len_idx = ids[-1]
if seq_len_idx == -1:
# make fake ids to avoid returning zero flow for poke sampling
fake_ids = (ids[0],10)
flow = self._get_flow(fake_ids)
else:
flow = self._get_flow(ids)
# compute amplitude
amplitude = torch.norm(flow[:, self.valid_h[0]:self.valid_h[1], self.valid_w[0]:self.valid_w[1]], 2, dim=0)
amplitude -= amplitude.min()
amplitude /= amplitude.max()
if seq_len_idx == -1:
# use only very small poke values, this should indicate background values
amplitude_filt = amplitude
if self.filter_flow:
# only consider the part of the mask which corresponds to the region considered in flow
#amplitude_filt = torch.from_numpy(np.where(self.mask["img_start"][self.valid_h[0]:self.valid_h[1],self.valid_w[0]:self.valid_w[1]], amplitude, np.zeros_like(amplitude)))
indices_pre = np.nonzero(np.logical_not(self.mask["img_start"][self.valid_h[0]:self.valid_h[1],self.valid_w[0]:self.valid_w[1]]))
indices = torch.from_numpy(np.stack(indices_pre,axis=-1))
if indices.shape[0] == 0:
indices = torch.lt(amplitude, np.percentile(amplitude.numpy(), 5)).nonzero(as_tuple=False)
else:
indices = torch.lt(amplitude, np.percentile(amplitude.numpy(), 5)).nonzero(as_tuple=False)
#amplitude_filt = amplitude
std = amplitude_filt.std()
mean = torch.mean(amplitude_filt)
indices_mgn = torch.gt(amplitude_filt, mean + (std)).nonzero(as_tuple=False)
if indices_mgn.shape[0] == 0:
# if flow is not entirely equally distributed, there should be at least 1 value which is above the mean
# self.logger.warn("Fallback in Dataloading bacause no values remain after filtering.")
indices_mgn = torch.gt(amplitude_filt, mean).nonzero(as_tuple=False)
indices_mgn = indices_mgn + np.asarray([[self.valid_h[0], self.valid_w[0]]], dtype=np.int)
indices_mgn = (indices_mgn[:, 0], indices_mgn[:, 1])
else:
if self.filter_flow:
# only consider the part of the mask which corresponds to the region considered in flow
amplitude_filt = torch.from_numpy(np.where(self.mask["img_start"][self.valid_h[0]:self.valid_h[1],self.valid_w[0]:self.valid_w[1]], amplitude, np.zeros_like(amplitude)))
else:
amplitude_filt = amplitude
std = amplitude_filt.std()
mean = torch.mean(amplitude_filt)
if self.var_sequence_length:
amplitude_filt = torch.where(torch.from_numpy(np.logical_and((amplitude_filt > self.seq_len_T_chunk[ids[-1]]).numpy(),(amplitude_filt<self.seq_len_T_chunk[ids[-1]+1]).numpy())),
amplitude_filt, torch.zeros_like(amplitude_filt))
# compute valid indices by thresholding
indices = torch.gt(amplitude_filt, mean + (std * 2.0)).nonzero(as_tuple=False)
if indices.shape[0] == 0:
indices = torch.gt(amplitude, mean + std).nonzero(as_tuple=False)
if indices.shape[0] == 0:
# if flow is not entirely equally distributed, there should be at least 1 value which is above the mean
#self.logger.warn("Fallback in Dataloading bacause no values remain after filtering.")
indices = torch.gt(amplitude, mean).nonzero(as_tuple=False)
indices = indices + np.asarray([[self.valid_h[0], self.valid_w[0]]], dtype=np.int)
# check if indices is not empty, if so, sample another frame (error is catched in __getitem__())
if indices.shape[0] == 0:
raise FlowError(path=[],msg=f"Empty indices array at index {ids[0]}....")
# shift ids to match size of real flow patch
indices = (indices[:, 0], indices[:, 1])
# generate number of pokes
n_pokes = int(
np.random.randint(
1, min(self.config["n_pokes"], int(indices[0].shape[0])) + 1
)
)
if seq_len_idx == -1:
ids_mgn = np.random.randint(indices_mgn[0].shape[0], size=n_pokes)
row_ids_mgn = indices_mgn[0][ids_mgn]
col_ids_mgn = indices_mgn[1][ids_mgn]
# and generate the actual pokes
ids = np.random.randint(indices[0].shape[0], size=n_pokes)
row_ids = indices[0][ids]
col_ids = indices[1][ids]
pokes = []
half_poke_size = int(self.poke_size / 2)
zeros = torch.zeros_like(flow)
poke_targets = []
for n,ids in enumerate(zip(row_ids, col_ids)):
poke = zeros
if seq_len_idx == -1:
poke_target =flow[:,row_ids_mgn[n],col_ids_mgn[n]].unsqueeze(-1).unsqueeze(-1) if self.equal_poke_val else \
flow[:,row_ids_mgn[n] - half_poke_size:row_ids_mgn[n] + half_poke_size +1,
col_ids_mgn[n] - half_poke_size:col_ids_mgn[n] + half_poke_size +1]
else:
poke_target = flow[:,ids[0],ids[1]].unsqueeze(-1).unsqueeze(-1) if self.equal_poke_val else flow[:,
ids[0] - half_poke_size : ids[0] + half_poke_size + 1,
ids[1] - half_poke_size : ids[1] + half_poke_size + 1,]
poke[
:,
ids[0] - half_poke_size: ids[0] + half_poke_size + 1,
ids[1] - half_poke_size: ids[1] + half_poke_size + 1,
] = poke_target
pokes.append(poke)
loc_and_poke = (ids,poke_target)
poke_targets.append(loc_and_poke)
# unsqueeze in case of num_pokes = 1
if self.flow_weights:
if self.yield_videos:
if seq_len_idx == -1:
complete_mask = np.ones(self.config["spatial_size"], dtype=np.bool)
else:
complete_mask = np.logical_or(np.logical_or(self.mask["img_tgt"],self.mask["img_start"]), self.mask["img_mid"])
mask_ids = np.nonzero(complete_mask)
try:
min_h = mask_ids[0].min()
max_h = mask_ids[0].max()
min_w = mask_ids[1].min()
max_w = mask_ids[1].max()
weights = np.full(self.mask["img_start"].shape,self.weight_value_bg)
weights[min_h:max_h,min_w:max_w] = self.weight_value_flow
except Exception as e:
self.logger.warn(f'Catch exception in "dataset._get_poke()": {e.__class__.__name__}: "{e}". Using full image instead of patch....')
weights = np.full(self.mask["img_start"].shape,self.weight_value_bg)
weights[self.valid_h[0]:self.valid_h[1],self.valid_w[0]:self.valid_w[1]] = self.weight_value_flow
#weights = np.where(complete_mask,np.full_like(complete_mask,self.weight_value_flow,dtype=np.float),np.full_like(complete_mask,self.weight_value_bg,dtype=np.float),)
else:
weights = np.where(self.mask["img_tgt"],np.full_like(self.mask["img_tgt"],self.weight_value_flow,dtype=np.float),np.full_like(self.mask["img_tgt"],self.weight_value_bg,dtype=np.float),)
# poke regions get higher weights
# for poke in pokes:
# weights = np.where(((poke**2).sum(0)>0),np.full_like(weights,self.weight_value_poke),weights)
weights = torch.from_numpy(weights)
pokes = torch.stack(pokes, dim=0).squeeze(0)
if "yield_poke_target" in kwargs:
return pokes, weights, poke_targets
return pokes, weights
else:
pokes = torch.stack(pokes, dim=0).squeeze(0)
if "yield_poke_target" in kwargs:
return pokes, poke_targets
return pokes
def _get_flow_series(self, ids, step_width=10, **kwargs):
all_flows = []
for i in range(1, step_width+1):
new_ids = (ids[0] + i * (1 + self.valid_lags[0]) * 5, self.valid_lags[0], ids[1])
flow = self._get_3d_flow(new_ids)
all_flows.append(flow)
return torch.from_numpy(np.stack(all_flows, axis=0))
# extracts entire flow
def _get_flow(self, ids, **kwargs):
if self.flow_in_ram:
flow = torch.from_numpy(self.loaded_flows[ids[0]])
else:
flow_path = self.datadict["flow_paths"][ids[0], self.valid_lags[0]]
# debug, this path seems to be erroneous
# flow_path = "/export/data/ablattma/Datasets/plants/processed_crops/VID_0_3_1024x1024/prediction_3_28.flow.npy"
try:
flow = np.load(flow_path)
except ValueError:
try:
flow = np.load(flow_path,allow_pickle=True)
except Exception as ex:
print(ex)
raise FlowError(flow_path)
except:
raise FlowError(flow_path)
if self.normalize_flows:
flow = flow / self.flow_norms["max_norm"][self.valid_lags[0]]
elif not self.normalize_flows and self.scale_poke_to_res:
# scaling of poke magnitudes to current resolution
flow = flow / (flow.shape[1]/self.config["spatial_size"][0])
dsize = self.config["spatial_size"]
flow = F.interpolate(
torch.from_numpy(flow).unsqueeze(0), size=dsize, mode="bilinear",align_corners=True
).squeeze(0)
if ids[-1] == -1:
flow = torch.zeros_like(flow)
if self.geom_transfs is not None:
c1 = Image.fromarray(flow[0].numpy(),mode="F")
c2 = Image.fromarray(flow[1].numpy(),mode="F")
for tr in self.geom_transfs:
c1 = tr(c1)
c2 = tr(c2)
flow = torch.from_numpy(np.stack([np.array(c1.getdata()).reshape(c1.size[0],c1.size[1]),
np.array(c2.getdata()).reshape(c2.size[0],c2.size[1])],axis=0)).to(torch.float)
return flow
def _get_color_transforms(self):
# to make sure, the transformations are always coherent within the same sample
make_trans = bool(np.random.choice(np.arange(2), size=1, p=[1 - self.p_col ,self.p_col]))
brightness_val = float(np.random.uniform(-self.ab,self.ab,1)) if self.ab > 0. and make_trans else 0.
contrast_val = float(np.random.uniform(-self.ac, self.ac, 1)) if self.ac > 0. and make_trans else 0.
hue_val = float(np.random.uniform(-self.ah, 2 * self.ah, 1)) if self.ah > 0. and make_trans else 0.
saturation_val = 1. + (float(np.random.uniform(-self.a_s,self.a_s)) if self.a_s > 0. and make_trans else 0)
b_T = partial(FT.adjust_brightness,brightness_factor=1. + brightness_val)
c_T = partial(FT.adjust_contrast,contrast_factor=1. + contrast_val)
h_T = partial(FT.adjust_hue, hue_factor=hue_val)
s_T = partial(FT.adjust_saturation,saturation_factor =saturation_val)
return [b_T,c_T,h_T,s_T]
def _get_geometric_transforms(self):
# to make sure, the transformations are always coherent within the same sample
make_trans = bool(np.random.choice(np.arange(2),size=1,p=[1-self.p_geom,self.p_geom]))
rval = float(np.random.uniform(-self.ad,self.ad,1)) if self.ad > 0. and make_trans else 0.
tval_vert = int(np.random.randint(int(-self.at[0] * self.config["spatial_size"][1] / 2), int(self.at[0] * self.config["spatial_size"][1] / 2), 1)) if self.at[0] > 0 and make_trans else 0
tval_hor = int(np.random.randint(int(-self.at[1] * self.config["spatial_size"][0] / 2), int(self.at[1] * self.config["spatial_size"][0] / 2), 1)) if self.at[1] > 0 and make_trans else 0
a_T = partial(FT.affine,angle=rval,translate=(tval_hor,tval_vert),scale=1.0,shear=0)
p = partial(FT.pad,padding=(int(self.config["spatial_size"][0] / 2), int(self.config["spatial_size"][1] / 2)),padding_mode="reflect")
c = partial(FT.center_crop,output_size=self.config["spatial_size"])
return [p,a_T,c]
def _get_flip_transform(self):
flip = bool(np.random.choice([True,False],size=1))
if flip:
return FT.vflip
else:
return None
@abstractmethod
def __len__(self):
# as len at least once before dataloading, generic checks can be put here
assert self.valid_lags is not None
assert self.min_frames is not None
if self.filter_flow:
assert self.flow_width_factor is not None, f"If the dataset shall be filtered, the flow width factor has to be set in the constructor of the respective child class of BaseDataset"
assert isinstance(self.flow_width_factor,int)
if self.flow_weights:
assert self.flow_width_factor is not None
if self.normalize_flows:
assert self.flow_norms is not None
if self.flow_in_ram:
assert len(self.loaded_flows) == self.datadict["flow_paths"].shape[0]
if self.imgs_in_ram:
assert len(self.loaded_imgs) == self.datadict["img_path"].shape[0]
if self.var_sequence_length:
assert self.normalize_flows
assert self.yield_videos
assert len(self.ids_per_seq_len) > 0
assert len(self.object_weights_per_seq_len) == len(self.ids_per_seq_len)
return self.datadict["flow_paths"].shape[0] if isinstance(self.datadict["flow_paths"],np.ndarray) else len(self.datadict["flow_paths"])
@abstractmethod
def _set_instance_specific_values(self):
pass
@abstractmethod
def get_test_app_images(self) -> dict:
pass
| 36,611 | 46.119691 | 255 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/data/human36m_preprocess.py | #code heaviliy borrowed from https://github.com/anibali/h36m-fetch
from subprocess import call
from os import path, makedirs
import hashlib
from tqdm import tqdm
import configparser
import requests
import tarfile
from glob import glob
BASE_URL = 'http://vision.imar.ro/human3.6m/filebrowser.php'
subjects = [
('S1', 1),
('S5', 6),
('S6', 7),
('S7', 2),
('S8', 3),
('S9', 4),
('S11', 5),
]
def md5(filename):
hash_md5 = hashlib.md5()
with open(filename, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def download_file(url, dest_file, phpsessid):
call(['axel',
'-a',
'-n', '24',
'-H', 'COOKIE: PHPSESSID=' + phpsessid,
'-o', dest_file,
url])
def get_config():
dirpath = path.dirname(path.realpath(__file__))
config = configparser.ConfigParser()
config.read(path.join(dirpath,'config.ini'))
return config
def get_phpsessid(config):
try:
phpsessid = config['General']['PHPSESSID']
except (KeyError, configparser.NoSectionError):
print('Could not read PHPSESSID from `config.ini`.')
phpsessid = input('Enter PHPSESSID: ')
return phpsessid
def verify_phpsessid(phpsessid):
requests.packages.urllib3.disable_warnings()
test_url = 'http://vision.imar.ro/human3.6m/filebrowser.php'
resp = requests.get(test_url, verify=False, cookies=dict(PHPSESSID=phpsessid))
fail_message = 'Failed to verify your PHPSESSID. Please ensure that you ' \
'are currently logged in at http://vision.imar.ro/human3.6m/ ' \
'and that you have copied the PHPSESSID cookie correctly.'
assert resp.url == test_url, fail_message
def download_all(phpsessid, out_dir):
checksums = {}
dirpath = path.dirname(path.realpath(__file__))
with open(path.join(dirpath,'checksums.txt'), 'r') as f:
for line in f.read().splitlines(keepends=False):
v, k = line.split(' ')
checksums[k] = v
files = []
for subject_id, id in subjects:
files += [
('Videos_{}.tgz'.format(subject_id),
'download=1&filepath=Videos&filename=SubjectSpecific_{}.tgz'.format(id)),
]
# out_dir = 'video_download'
# makedirs(out_dir, exist_ok=True)
for filename, query in tqdm(files, ascii=True):
out_file = path.join(out_dir, filename)
if path.isfile(out_file):
continue
if path.isfile(out_file):
checksum = md5(out_file)
if checksums.get(out_file, None) == checksum:
continue
download_file(BASE_URL + '?' + query, out_file, phpsessid)
# https://stackoverflow.com/a/6718435
def commonprefix(m):
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
def extract_tgz(tgz_file, dest):
# if path.exists(dest):
# return
with tarfile.open(tgz_file, 'r:gz') as tar:
members = [m for m in tar.getmembers() if m.isreg()]
member_dirs = [path.dirname(m.name).split(path.sep) for m in members]
base_path = path.sep.join(commonprefix(member_dirs))
for m in members:
m.name = path.relpath(m.name, base_path)
tar.extractall(dest)
def extract(out_dir,tgzs):
out_dir = path.join(out_dir,'videos')
for tgz in tqdm(tgzs,desc='Extracting tgz archives'):
subject_id = tgz.split('_')[-1].split('.')[0]
videodir = path.join(out_dir,subject_id)
makedirs(videodir,exist_ok=True)
extract_tgz(tgz,videodir)
if __name__ == '__main__':
config = get_config()
phpsessid = get_phpsessid(config)
verify_phpsessid(phpsessid)
out_dir = config['General']['TARGETDIR']
download_dir = path.join(out_dir,'video_download')
makedirs(download_dir,exist_ok=True)
download_all(phpsessid,out_dir=download_dir)
tgzs = glob(path.join(download_dir,'*.tgz'))
extract(out_dir,tgzs)
| 4,081 | 27.347222 | 86 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/data/__init__.py | from data.base_dataset import BaseDataset
from torchvision import transforms as tt
from data.flow_dataset import PlantDataset, IperDataset,Human36mDataset, VegetationDataset, LargeVegetationDataset, TaichiDataset
# add key value pair for datasets here, all datasets should inherit from base_dataset
__datasets__ = {"IperDataset": IperDataset,
"PlantDataset": PlantDataset,
"Human36mDataset": Human36mDataset,
"VegetationDataset": VegetationDataset,
"LargeVegetationDataset": LargeVegetationDataset,
"TaichiDataset": TaichiDataset,
}
# returns only the class, not yet an instance
def get_transforms(config):
return {
"PlantDataset": tt.Compose(
[
tt.ToTensor(),
tt.Lambda(lambda x: (x * 2.0) - 1.0),
]
),
"IperDataset": tt.Compose(
[
tt.ToTensor(),
tt.Lambda(lambda x: (x * 2.0) - 1.0),
]
),
"Human36mDataset": tt.Compose(
[
tt.ToTensor(),
tt.Lambda(lambda x: (x * 2.0) - 1.0),
]
),
"VegetationDataset": tt.Compose(
[
tt.ToTensor(),
tt.Lambda(lambda x: (x * 2.0) - 1.0),
]
),
"LargeVegetationDataset": tt.Compose(
[
tt.ToTensor(),
tt.Lambda(lambda x: (x * 2.0) - 1.0),
]
),
"TaichiDataset": tt.Compose(
[
tt.ToTensor(),
tt.Lambda(lambda x: (x * 2.0) - 1.0),
]
),
}
def get_dataset(config, custom_transforms=None):
dataset = __datasets__[config["dataset"]]
if custom_transforms is not None:
print("Returning dataset with custom transform")
transforms = custom_transforms
else:
transforms = get_transforms(config)[config["dataset"]]
return dataset, transforms
| 2,041 | 29.029412 | 129 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/data/prepare_dataset.py | import os
import cv2
import re
import argparse
import torch
import numpy as np
from os import path, makedirs
import pickle
from tqdm import tqdm
from glob import glob
from natsort import natsorted
import yaml
import multiprocessing as mp
from multiprocessing import Process
from functools import partial
from dotmap import DotMap
from torchvision import transforms as tt
import configparser
from utils.general import parallel_data_prefetch
from data import get_dataset
from data.helper_functions import preprocess_image
h36m_aname2aid = {name: i for i, name in enumerate(["Directions","Discussion","Eating","Greeting","Phoning",
"Posing","Purchases","Sitting","SittingDown","Smoking",
"Photo","Waiting","Walking","WalkDog","WalkTogether"])}
h36m_aname2aid.update({"WalkingTogether": h36m_aname2aid["WalkTogether"]})
h36m_aname2aid.update({"WalkingDog": h36m_aname2aid["WalkDog"]})
h36m_aname2aid.update({"TakingPhoto": h36m_aname2aid["Photo"]})
def _do_parallel_data_prefetch(func, Q, data, idx):
# create dummy dataset instance
# run prefetching
res = func(data)
Q.put([idx, res])
Q.put("Done")
def get_image(vidcap, frame_number,spatial_size=None):
vidcap.set(1, frame_number)
_, img = vidcap.read()
if spatial_size is not None and spatial_size != img.shape[0]:
img=cv2.resize(img,(spatial_size,spatial_size),interpolation=cv2.INTER_LINEAR)
return img
def process_video(f_name, args):
from utils.flownet_loader import FlownetPipeline
from utils.general import get_gpu_id_with_lowest_memory, get_logger
target_gpus = None if len(args.target_gpus) == 0 else args.target_gpus
gpu_index = get_gpu_id_with_lowest_memory(target_gpus=target_gpus)
torch.cuda.set_device(gpu_index)
#f_name = vid_path.split(vid_path)[-1]
logger = get_logger(f"{gpu_index}")
extract_device = torch.device("cuda", gpu_index.index if isinstance(gpu_index,torch.device) else gpu_index)
# load flownet
pipeline = FlownetPipeline()
flownet = pipeline.load_flownet(args, extract_device)
# open video
base_raw_dir = args.raw_dir.split("*")[0]
if not isinstance(f_name,list):
f_name = [f_name]
logger.info(f"Iterating over {len(f_name)} files...")
for fn in tqdm(f_name,):
if fn.startswith('/'):
fn = fn[1:]
vid_path = path.join(base_raw_dir, fn)
# vid_path = f"Code/input/train_data/movies/{fn}"
vidcap = cv2.VideoCapture()
vidcap.open(vid_path)
counter = 0
while not vidcap.isOpened():
counter += 1
time.sleep(1)
if counter > 10:
raise Exception("Could not open movie")
# get some metadata
number_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
height = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
width = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
#upright = height > widt
# create target path if not existent
if args.data.dataset == 'Human36mDataset':
vid_name = fn.split('/')[-1]
if 'ALL' in vid_name:
continue
action = vid_name.split(' ')[0] if ' ' in vid_name else vid_name.split('.')[0]
same_action_videos =list(filter(lambda y : y.startswith(action) and re.search(r'\d+$', y.split('.')[0]) is not None,
map(lambda x: x.split('/')[-1],f_name)))
subject = fn.split('/')[-2]
if re.search(r'\d+$', fn.split('.')[0]) is not None:
subaction_id = int(fn[-1])
else:
max_id = max(map(lambda z: int(z.split(' ')[-1].split('.')[0]), same_action_videos))
if max_id ==2:
subaction_id = 1
else:
subaction_id = 2
cam_id = vid_name.split('.')[1]
base_path = path.join(args.processed_dir,subject,f'{action}-{subaction_id}',cam_id)
else:
base_path = path.join(args.processed_dir, fn.split(".")[0]) #.replace(str,str(args.spatial_size)))
# base_path = f"Code/input/train_data/images/{f_name.split('.')[0]}/"
makedirs(base_path, exist_ok=True)
delta = args.flow_delta
diff = args.flow_max
# begin extraction
for frame_number in range(0, number_frames, args.frames_discr):
# break if not enough frames to properly extract sequence
if frame_number >= number_frames - diff * args.frames_discr:
break
first_fidx, second_fidx = frame_number, frame_number + diff * args.frames_discr
image_target_file = path.join(base_path, f"frame_{frame_number}.png")
# image_target_file = f"{base_path}frame_{frame_number}.png"
# FRAME
if not path.exists(image_target_file):
# write frame itself
img = get_image(vidcap, frame_number)
if img is None:
continue
# if upright:
# img = cv2.transpose(img)
try:
if args.spatial_size is None:
success = cv2.imwrite(image_target_file, img)
else:
img_res = cv2.resize(img,(args.spatial_size,args.spatial_size), interpolation=cv2.INTER_LINEAR)
success = cv2.imwrite(image_target_file,img_res)
except cv2.error as e:
print(e)
continue
except Exception as ex:
print(ex)
continue
# if success:
# logger.info(f'wrote img with shape {img.shape} to "{image_target_file}".')
# FLOW
for d in range(0, diff*args.frames_discr, delta*args.frames_discr):
if second_fidx - d < number_frames:
flow_target_file = path.join(
base_path, f"prediction_{first_fidx}_{second_fidx-d}.flow"
)
if not os.path.exists(flow_target_file + ".npy"):
# predict and write flow prediction
img, img2 = (
get_image(vidcap, first_fidx),
get_image(vidcap, second_fidx - d),
)
image_target_file2 = path.join(base_path, f"frame_{second_fidx - d}.png")
if not path.exists(image_target_file2):
try:
if args.spatial_size is None:
success = cv2.imwrite(image_target_file2, img2)
else:
img_res2 = cv2.resize(img2, (args.spatial_size, args.spatial_size), interpolation=cv2.INTER_LINEAR)
success = cv2.imwrite(image_target_file2, img_res2)
except cv2.error as e:
print(e)
continue
except Exception as ex:
print(ex)
continue
sample = pipeline.preprocess_image(img, img2, "BGR",spatial_size=args.input_size).to(
extract_device
)
prediction = (
pipeline.predict(flownet, sample[None],spatial_size=args.spatial_size)
.cpu()
.detach()
.numpy()
)
np.save(flow_target_file, prediction)
logger.info(
f'Finish processing video sequence "{fn}".')
return "Finish"
def extract(args):
# if args.process_vids:
base_dir = args.raw_dir.split("*")[0]
if not args.raw_dir.endswith('*'):
args.raw_dir =path.join(args.raw_dir,'*')
data_names = [p.split(base_dir)[-1] for p in glob(args.raw_dir) if p.endswith(args.video_format)]
# data_names = [d for d in data_names if d in ['/VID_0_5.mkv','/VID_7_0.mkv']]
fn_extract = partial(process_video, args=args)
Q = mp.Queue(1000)
# step = (
# int(len(data_names) / args.num_workers + 1)
# if len(data_names) % args.num_workers != 0
# else int(len(data_names) / args.num_workers)
# )
splits = np.array_split(np.arange(len(data_names)), args.num_workers)
arguments = [
[fn_extract, Q, part, i]
for i, part in enumerate(
[data_names[s[0]:s[-1]+1] for s in splits]
)
]
processes = []
for i in range(args.num_workers):
p = Process(target=_do_parallel_data_prefetch, args=arguments[i])
processes += [p]
start = time.time()
gather_res = [[] for _ in range(args.num_workers)]
try:
for p in processes:
p.start()
time.sleep(20)
k = 0
while k < args.num_workers:
# get result
res = Q.get()
if res == "Done":
k += 1
else:
gather_res[res[0]] = res[1]
except Exception as e:
print("Exception: ", e)
for p in processes:
p.terminate()
raise e
finally:
for p in processes:
p.join()
print(f"Prefetching complete. [{time.time() - start} sec.]")
def prepare(args):
logger = get_logger("dataset_preparation")
datadict = {
"img_path": [],
"flow_paths": [],
"fid": [],
"vid": [],
"img_size": [],
"flow_size": [],
"object_id":[],
"max_fid": []
}
if "iPER" in args.processed_dir.split("/") or "human36m" in args.processed_dir.split("/") or \
"human3.6M" in args.processed_dir.split("/") :
datadict.update({"action_id": [], "actor_id": []})
train_test_split = args.data.dataset == 'Human36mDataset' or args.data.dataset == 'TaichiDataset'
fmax = args.flow_max
fdelta = args.flow_delta
fd = args.frames_discr
if train_test_split:
datadict.update({"train": []})
if args.data.dataset == 'TaichiDataset':
oname2oid = {}
# logger.info(f'Metafile is stored as "{args.meta_file_name}.p".')
# logger.info(f"args.check_imgs is {args.check_imgs}")
max_flow_length = int(fmax / fdelta)
# if args.process_vids:
if train_test_split:
if args.data.dataset == 'Human36mDataset':
videos = [d for d in glob(path.join(args.processed_dir, "*", "*", '*')) if path.isdir(d)]
else:
videos = [d for d in glob(path.join(args.processed_dir, "*", "*")) if path.isdir(d)]
else:
videos = [d for d in glob(path.join(args.processed_dir, "*")) if path.isdir(d)]
videos = natsorted(videos)
actual_oid = 0
for vid, vid_name in enumerate(videos):
images = glob(path.join(vid_name, "*.png"))
images = natsorted(images)
actor_id = action_id = train = None
if args.data.dataset == 'PlantDataset':
object_id = int(vid_name.split("/")[-1].split("_")[1])
elif args.data.dataset == 'IperDataset':
object_id = 100 * int(vid_name.split("/")[-1].split("_")[0]) + int(vid_name.split("/")[-1].split("_")[1])
actor_id = int(vid_name.split("/")[-1].split("_")[0])
action_id = int(vid_name.split("/")[-1].split("_")[-1])
elif args.data.dataset == 'TaichiDataset':
train = "train" == vid_name.split("/")[-2]
msg = "train" if train else "test"
print(f"Video in {msg}-split")
obj_name = vid_name.split("/")[-1].split("#")[0]
if obj_name in oname2oid.keys():
object_id = oname2oid[obj_name]
else:
object_id = actual_oid
oname2oid.update({obj_name: actual_oid})
actual_oid += 1
elif args.data.dataset == 'Human36mDataset':
actor_id = int(vid_name.split('/')[-3][1:])
object_id = actor_id
action_name = vid_name.split('/')[-2].split('-')[0]
action_id = h36m_aname2aid[action_name]
train = actor_id not in [9,11]
else:
raise ValueError("invalid dataset....")
# max_flow_id = [len(images) - flow_step -1 for flow_step in range(fdelta*fd,fmax*fd+1, fdelta*fd)]
for i, img_path in enumerate(
tqdm(
images,
desc=f'Extracting meta information of video "{vid_name.split(args.processed_dir)[-1]}"',
)
):
fid = int(img_path.split("_")[-1].split(".")[0])
#search_pattern = f'[{",".join([str(fid + n) for n in range(args.flow_delta,args.flow_max + 1, args.flow_delta)])}]'
flows = natsorted([s for s in glob(path.join(vid_name, f"prediction_{fid}_*.npy"))
if (int(s.split("_")[-1].split(".")[0]) - int(s.split("_")[-2])) % (fdelta * fd) == 0 and
int(s.split("_")[-1].split(".")[0]) - int(s.split("_")[-2]) <= fmax*fd])
# skip example if second image path does not exist
if any(map(lambda p: not path.isfile(path.join(vid_name, f'frame_{p.split("_")[-1].split(".")[0]}.png')),flows)):
logger.info(f'Breaking meta file information processing earlier for video "{vid_name.split("/")[-1]}", since not all image frames have been extracted.')
break
# make relative paths
img_path_rel = img_path.split(args.processed_dir)[1]
flows_rel = [f.split(args.processed_dir)[1] for f in flows]
# filter flows
flows_rel = [f for f in flows_rel if (int(f.split("/")[-1].split(".")[0].split("_")[-1]) - int(f.split("/")[-1].split(".")[0].split("_")[-2])) <= fmax*fd]
if len(flows_rel) < max_flow_length:
diff = max_flow_length-len(flows_rel)
[flows_rel.insert(len(flows_rel),last_flow_paths[len(flows_rel)]) for _ in range(diff)]
w_img = args.spatial_size
h_img = args.spatial_size
if len(flows) > 0:
w_f = args.spatial_size
h_f = args.spatial_size
else:
h_f = w_f = None
assert len(flows_rel) == max_flow_length
datadict["img_path"].append(img_path_rel)
datadict["flow_paths"].append(flows_rel)
datadict["fid"].append(fid)
datadict["vid"].append(vid)
# image size compliant with numpy and torch
datadict["img_size"].append((h_img, w_img))
datadict["flow_size"].append((h_f, w_f))
datadict["object_id"].append(object_id)
# datadict["max_fid"].append(max_flow_id)
if action_id is not None:
datadict["action_id"].append(action_id)
if actor_id is not None:
datadict["actor_id"].append(actor_id)
if train is not None:
datadict["train"].append(train)
last_flow_paths = flows_rel
logger.info(f'Prepared dataset consists of {len(datadict["img_path"])} samples.')
# Store data (serialize)
save_path = path.join(
args.processed_dir, "meta.p"
)
with open(save_path, "wb") as handle:
pickle.dump(datadict, handle, protocol=pickle.HIGHEST_PROTOCOL)
def load_flow(flow_paths):
norms = []
for i, flow_path in enumerate(tqdm(flow_paths)):
# debug, this path seems to be erroneous
# flow_path = "/export/data/ablattma/Datasets/plants/processed_crops/VID_0_3_1024x1024/prediction_3_28.flow.npy"
try:
flow = np.load(flow_path)
except Exception as e:
print(e)
continue
n = np.linalg.norm(flow,2,0)
min_norm = np.amin(n)
max_norm = np.amax(n)
norms.append(np.stack([max_norm,min_norm]))
norms = np.stack(norms,0)
return norms
def norms(cfg_dict):
cfg_dict['data']['normalize_flows'] = False
transforms = tt.Compose(
[tt.ToTensor(), tt.Lambda(lambda x: (x * 2.0) - 1.0)]
)
datakeys = ["flow", "images"]
dataset, _ = get_dataset(config=cfg_dict["data"])
test_dataset = dataset(transforms, datakeys, cfg_dict["data"], train=True)
flow_paths = test_dataset.data["flow_paths"]
stats_dict = {"max_norm": [], "min_norm": [], "percentiles": []}
for i in range(flow_paths.shape[-1]):
test_dataset.logger.info(f'Computing mean of flow with lag {(i + 1) * cfg_dict["flow_delta"]}')
norms = parallel_data_prefetch(load_flow, flow_paths[:, i], cfg_dict['data']['num_workers'])
max_n = np.amax(norms[:, 0])
min_n = np.amin(norms[:, 1])
percs_at = list(range(10, 100, 10))
percs = np.percentile(norms[:, 0], percs_at)
stats_dict["percentiles"].append({pa: p for pa, p in zip(percs_at, percs)})
stats_dict["max_norm"].append(float(max_n))
stats_dict["min_norm"].append(float(min_n))
# save
savepath = path.join(test_dataset.datapath, "dataset_stats.p")
with open(savepath, "wb") as handle:
pickle.dump(stats_dict, handle, protocol=pickle.HIGHEST_PROTOCOL)
def process_flows(flow_data):
out = np.zeros((len(flow_data), 3))
for i, dp in enumerate(tqdm(flow_data)):
flow = np.load(dp[0])
lag = dp[2]
test_dataset = dp[3]
# flow = flow - test_dataset.flow_norms["min_norm"][test_dataset.valid_lags[0]]
flow = flow / test_dataset.flow_norms["max_norm"][lag]
img = cv2.imread(dp[1])
# image is read in BGR
img = preprocess_image(img, swap_channels=True)
mask = np.zeros(img.shape[:2], np.uint8)
# rect defines starting background area
if test_dataset.filter_flow:
rect = (
int(img.shape[1] / test_dataset.flow_width_factor), test_dataset.valid_h[0], int((test_dataset.flow_width_factor - 2) / test_dataset.flow_width_factor * img.shape[1]),
test_dataset.valid_h[1] - test_dataset.valid_h[0])
# initialize background and foreground models
fgm = np.zeros((1, 65), dtype=np.float64)
bgm = np.zeros((1, 65), dtype=np.float64)
# apply grab cut algorithm
mask2, fgm, bgm = cv2.grabCut(img, mask, rect, fgm, bgm, 5, cv2.GC_INIT_WITH_RECT)
amplitude = np.linalg.norm(flow[:, test_dataset.valid_h[0]:test_dataset.valid_h[1], test_dataset.valid_w[0]:test_dataset.valid_w[1]], 2, axis=0)
if test_dataset.filter_flow:
# only consider the part of the mask which corresponds to the region considered in flow
amplitude_filt = np.where(mask2[test_dataset.valid_h[0]:test_dataset.valid_h[1], test_dataset.valid_w[0]:test_dataset.valid_w[1]], amplitude, np.zeros_like(amplitude))
else:
amplitude_filt = amplitude
std = amplitude_filt.std()
mean = np.mean(amplitude_filt)
indices = np.argwhere(np.greater(amplitude_filt, mean + (std * 2.0)))
if indices.shape[0] == 0:
indices = np.argwhere(np.greater(amplitude_filt, np.mean(amplitude_filt) + amplitude_filt.std()))
if indices.shape[0] == 0:
print("Fallback in Dataloading bacause no values remain after filtering.")
# there should be at least one element that is above the mean if flows are not entirely equally distributed
indices = np.argwhere(np.greater(amplitude_filt, mean))
if indices.shape[0] == 0:
print("strange case, cannot occure, skip")
out[i, -1] = 1
continue
values = np.asarray([amplitude_filt[idx[0], idx[1]] for idx in indices])
out[i, 0] = values.min()
out[i, 1] = values.max()
return out
def stats(cfg_dict):
logger=get_logger("stats_calculation")
cfg_dict['data']['normalize_flows'] = True
transforms = tt.Compose(
[tt.ToTensor(), tt.Lambda(lambda x: (x * 2.0) - 1.0)]
)
datakeys = ["flow", "images"]
dataset, _ = get_dataset(config=cfg_dict["data"])
test_dataset = dataset(transforms, datakeys, cfg_dict["data"], train=True)
all_frange_data = []
for l in tqdm(range(test_dataset.data['flow_paths'].shape[-1])):
logger.info(f'Calculating stats for lag of {(l+1) * cfg_dict["flow_delta"]} frames...')
in_data = [(f, i, l, test_dataset) for f, i in zip(test_dataset.data["flow_paths"][:, l], test_dataset.data["img_path"])]
out_data = parallel_data_prefetch(process_flows,in_data[:100], n_proc=20, cpu_intensive=True, target_data_type="list")
all_frange_data.append(out_data)
n_error = np.count_nonzero(out_data[:, 2])
logger.info(f"While loading the data, {n_error} errors occurred.")
all_frange_data = np.stack(all_frange_data,axis=-1)
assert all_frange_data.shape[-1] == test_dataset.datadict['flow_paths'].shape[-1]
with open(path.join(test_dataset.datapath,f"{test_dataset.metafilename}.p"),"rb") as f:
datadict = pickle.load(f)
#assert out_data.shape[0] == len(datadict["img_path"])
key = "flow_range"
name_key = "frange"
datadict.update({key: all_frange_data})
with open(path.join(test_dataset.datapath, f"{test_dataset.metafilename}_{name_key}.p"), "wb") as f:
pickle.dump(datadict, f, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
import time
from utils.general import get_logger
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config',type=str,required=True,help='Config file containing all parameters.')
config_args = parser.parse_args()
fpath = path.dirname(path.realpath(__file__))
configfile = path.abspath(path.join(fpath,f'../{config_args.config}'))
with open(configfile,'r') as f:
args = yaml.load(f,Loader=yaml.FullLoader)
cfg_dict = args
args = DotMap(args)
if args.data.dataset == 'Human3.6mDataset':
h36config = configparser.ConfigParser()
h36config.read(path.join(fpath, 'config.ini'))
args.raw_dir = path.join(h36config['General']['TARGETDIR'], 'videos','*','*')
cfg_dict['data']['datapath'] = args.processed_dir
if args.raw_dir == '':
raise ValueError(f'The data holding directory is currently not defined. please define the field "raw_dir" in "{config_args.config}"')
if args.processed_dir == '':
raise ValueError(f'The target directory for the extracted image frames and flow maps is currently undefined. Please define the field "processed_dir" in "{config_args.config}"')
pool = []
torch.multiprocessing.set_start_method("spawn")
if args.mode == "extract":
extract(args)
elif args.mode == "prepare": # in this case, it is prepare
prepare(args)
elif args.mode == 'stats':
# stats(cfg_dict)
raise NotImplementedError()
elif args.mode == 'norms':
# norms(cfg_dict)
raise NotImplementedError()
elif args.mode == 'all':
extract(args)
prepare(args)
# norms(cfg_dict)
# stats(cfg_dict)
else:
raise ValueError(f'The "mode"-parameter in config file "{configfile}" must be in [all, extract, prepare, norms, stats], but is actually "{args.mode}"...')
| 23,809 | 36.974482 | 185 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/data/samplers.py | import numpy as np
from torch.utils.data import BatchSampler,RandomSampler,SequentialSampler, WeightedRandomSampler
from data.base_dataset import BaseDataset
from data.flow_dataset import PlantDataset
class SequenceSampler(BatchSampler):
def __init__(self, dataset:BaseDataset, batch_size, shuffle, drop_last):
assert isinstance(dataset, BaseDataset), "The used dataset in Sequence Sampler must inherit from BaseDataset"
if shuffle:
sampler = RandomSampler(dataset)
else:
sampler = SequentialSampler(dataset)
super().__init__(sampler, batch_size, drop_last)
self.dataset = dataset
#self.max_lag = self.dataset.datadict["flow_paths"].shape[1]
def __iter__(self):
batch = []
# sample sequence length
lag = int(np.random.choice(self.dataset.valid_lags, 1))
for idx in self.sampler:
batch.append((idx, lag))
if len(batch) == self.batch_size:
yield batch
batch = []
# sample sequence length
lag = int(np.random.choice(self.dataset.valid_lags, 1))
if len(batch) > 0 and not self.drop_last:
yield batch
class FixedLengthSampler(BatchSampler):
def __init__(self, dataset:PlantDataset,batch_size,shuffle,drop_last, weighting, zero_poke,zero_poke_amount=None):
if shuffle:
if weighting:
sampler = WeightedRandomSampler(weights=dataset.datadict["weights"],num_samples=len(dataset))
else:
sampler = RandomSampler(dataset)
else:
sampler = SequentialSampler(dataset)
super().__init__(sampler, batch_size, drop_last)
self.shuffle = shuffle
self.dataset = dataset
self.zero_poke = zero_poke
self.zero_poke_amount = zero_poke_amount
if self.zero_poke:
assert self.zero_poke_amount is not None
def __iter__(self):
batch = []
if self.zero_poke:
# sample a certain proportion to be zero pokes
zero_poke_ids = np.random.choice(np.arange(self.dataset.__len__()),size=int(self.dataset.__len__()/ self.zero_poke_amount),replace=False).tolist()
self.dataset.logger.info(f"Sampling {len(zero_poke_ids)} zeropokes for next epoch")
else:
zero_poke_ids = []
for idx in self.sampler:
if idx in zero_poke_ids:
batch.append(-1)
else:
batch.append(idx)
if len(batch) == self.batch_size:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
class SequenceLengthSampler(BatchSampler):
def __init__(self, dataset:BaseDataset, batch_size, shuffle, drop_last, n_frames=None, zero_poke = False,):
assert isinstance(dataset, BaseDataset), "The used dataset in Sequence Sampler must inherit from BaseDataset"
assert dataset.var_sequence_length and dataset.yield_videos, "The dataset has to be run in sequence mode and has to output variable sequence lengths"
sampler = SequentialSampler(dataset)
super().__init__(sampler, batch_size, drop_last)
self.dataset = dataset
self.shuffle = shuffle
if n_frames is not None:
assert n_frames >= self.dataset.min_frames and n_frames <=(self.dataset.min_frames + self.dataset.max_frames)
self.n_frames = (n_frames-self.dataset.min_frames)
else:
self.n_frames = n_frames
self.start_n_frames = -1 if zero_poke else 0
if zero_poke:
if self.dataset.train:
self.len_p = np.asarray([self.dataset.zeropoke_weight] + [1.] * self.dataset.max_frames)
else:
self.len_p = np.asarray([1.] * (self.dataset.max_frames + 1))
else:
self.len_p = np.asarray([1.] * self.dataset.max_frames)
if self.dataset.longest_seq_weight != None and self.dataset.train:
self.len_p[-1] = self.dataset.longest_seq_weight
if zero_poke:
# to keep sufficient outside pokes for the model to learn foreground and background
self.len_p[0] = self.dataset.longest_seq_weight / 2
self.len_p = self.len_p /self.len_p.sum()
def __iter__(self):
batch = []
# sample sequence length
if self.shuffle:
# -1 corresponds to
n_frames = int(np.random.choice(np.arange(self.start_n_frames,self.dataset.max_frames), 1, p=self.len_p))
else:
last_n = self.start_n_frames
n_frames = last_n
if n_frames == -1:
n_frames_actual = int(np.random.choice(np.arange(self.dataset.max_frames), 1))
appended = (n_frames, n_frames_actual)
else:
appended = (n_frames, None)
for idx in self.sampler:
appended = (appended[0] if self.n_frames is None else self.n_frames,appended[1])
batch.append(appended)
if len(batch) == self.batch_size:
yield batch
batch = []
# sample sequence length
if self.shuffle:
n_frames = int(np.random.choice(np.arange(self.start_n_frames,self.dataset.max_frames), 1,p=self.len_p))
else:
n_frames = last_n+1 if last_n<self.dataset.max_frames-1 else self.start_n_frames
last_n = n_frames
if n_frames == -1:
n_frames_actual = int(np.random.choice(np.arange(self.dataset.max_frames), 1))
appended = (n_frames, n_frames_actual)
else:
appended = (n_frames, None)
if len(batch) > 0 and not self.drop_last:
yield batch | 5,888 | 38.26 | 158 | py |
interactive-image2video-synthesis | interactive-image2video-synthesis-main/data/download_taichi.py | import numpy as np
import pandas as pd
import imageio
import os
import subprocess
from multiprocessing import Pool
from itertools import cycle
import warnings
import glob
import time
from tqdm import tqdm
from argparse import ArgumentParser
from skimage import img_as_ubyte
from skimage.transform import resize
warnings.filterwarnings("ignore")
DEVNULL = open(os.devnull, 'wb')
def save(path, frames, format):
if format == '.mp4':
imageio.mimsave(path, frames)
else:
raise ValueError('Unsupported output format.')
def download(video_id, args):
video_path = os.path.join(args.video_folder, video_id + ".mp4")
subprocess.call([args.youtube, '-f', "''best/mp4''", '--write-auto-sub', '--write-sub',
'--sub-lang', 'en', '--skip-unavailable-fragments',
"https://www.youtube.com/watch?v=" + video_id, "--output",
video_path], stdout=DEVNULL, stderr=DEVNULL)
return video_path
def run(data):
video_id, args = data
if not os.path.exists(os.path.join(args.video_folder, video_id.split('#')[0] + '.mp4')):
download(video_id.split('#')[0], args)
if not os.path.exists(os.path.join(args.video_folder, video_id.split('#')[0] + '.mp4')):
print('Can not load video %s, broken link' % video_id.split('#')[0])
return
reader = imageio.get_reader(os.path.join(args.video_folder, video_id.split('#')[0] + '.mp4'))
fps = reader.get_meta_data()['fps']
df = pd.read_csv(args.metadata)
df = df[df['video_id'] == video_id]
all_chunks_dict = [{'start': df['start'].iloc[j], 'end': df['end'].iloc[j],
'bbox': list(map(int, df['bbox'].iloc[j].split('-'))), 'frames': []} for j in range(df.shape[0])]
ref_fps = df['fps'].iloc[0]
ref_height = df['height'].iloc[0]
ref_width = df['width'].iloc[0]
partition = df['partition'].iloc[0]
try:
for i, frame in enumerate(reader):
for entry in all_chunks_dict:
if (i * ref_fps >= entry['start'] * fps) and (i * ref_fps < entry['end'] * fps):
left, top, right, bot = entry['bbox']
left = int(left / (ref_width / frame.shape[1]))
top = int(top / (ref_height / frame.shape[0]))
right = int(right / (ref_width / frame.shape[1]))
bot = int(bot / (ref_height / frame.shape[0]))
crop = frame[top:bot, left:right]
if args.image_shape is not None:
crop = img_as_ubyte(resize(crop, args.image_shape, anti_aliasing=True))
entry['frames'].append(crop)
except imageio.core.format.CannotReadFrameError:
None
for entry in all_chunks_dict:
if 'person_id' in df:
first_part = df['person_id'].iloc[0] + "#"
else:
first_part = ""
first_part = first_part + '#'.join(video_id.split('#')[::-1])
path = first_part + '#' + str(entry['start']).zfill(6) + '#' + str(entry['end']).zfill(6) + '.mp4'
save(os.path.join(args.out_folder, partition, path), entry['frames'], args.format)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--video_folder", default='youtube-taichi', help='Path to youtube videos')
parser.add_argument("--metadata", default='data/taichi-metadata.csv', help='Path to metadata')
parser.add_argument("--out_folder", default='taichi', help='Path to output')
parser.add_argument("--format", default='.mp4', help='Storing format')
parser.add_argument("--workers", default=1, type=int, help='Number of workers')
parser.add_argument("--youtube", default='./youtube-dl', help='Path to youtube-dl')
parser.add_argument("--image_shape", default=(256, 256), type=lambda x: tuple(map(int, x.split(','))),
help="Image shape, None for no resize")
args = parser.parse_args()
if not os.path.exists(args.video_folder):
os.makedirs(args.video_folder)
if not os.path.exists(args.out_folder):
os.makedirs(args.out_folder)
for partition in ['test', 'train']:
if not os.path.exists(os.path.join(args.out_folder, partition)):
os.makedirs(os.path.join(args.out_folder, partition))
df = pd.read_csv(args.metadata)
video_ids = set(df['video_id'])
pool = Pool(processes=args.workers)
args_list = cycle([args])
for chunks_data in tqdm(pool.imap_unordered(run, zip(video_ids, args_list))):
None
| 4,549 | 40.363636 | 121 | py |
FOLD-R-PP | FOLD-R-PP-main/main.py | from foldrpp import *
from datasets import *
from timeit import default_timer as timer
from datetime import timedelta
def main():
# model, data = acute()
# model, data = autism()
# model, data = breastw()
model, data = cars()
# model, data = credit()
# model, data = heart()
# model, data = kidney()
# model, data = krkp()
# model, data = mushroom()
# model, data = sonar()
# model, data = voting()
# model, data = ecoli()
# model, data = ionosphere()
# model, data = wine()
# model, data = adult()
# model, data = credit_card()
# model, data = rain()
# model, data = heloc()
# model, data = parkison()
data_train, data_test = split_data(data, ratio=0.8, rand=True)
# line 28: 80% as training data, 20% as test data. shuffle data first when rand is True
# model, data_train, data_test = titanic()
# model, data_train, data_test = avila()
# model, data_train, data_test = anneal()
X_train, Y_train = split_xy(data_train) # split data into features and label
X_test, Y_test = split_xy(data_test)
start = timer()
model.fit(X_train, Y_train, ratio=0.5)
# line 39: ratio means # of exception examples / # of default examples a rule can imply = 0.5
end = timer()
model.print_asp(simple=True)
# line 43: output simplified rules when simple is True, default value is False
Y_test_hat = model.predict(X_test)
acc, p, r, f1 = get_scores(Y_test_hat, Y_test)
print('% acc', round(acc, 4), 'p', round(p, 4), 'r', round(r, 4), 'f1', round(f1, 4))
print('% foldr++ costs: ', timedelta(seconds=end - start), '\n')
# k = 1
# for i in range(10):
# print('Explanation for example number', k, ':')
# print(model.explain(X_test[i], all_flag=False))
# print('Proof Trees for example number', k, ':')
# print(model.proof(X_test[i], all_flag=False))
# k += 1
if __name__ == '__main__':
main()
| 1,983 | 31.52459 | 99 | py |
FOLD-R-PP | FOLD-R-PP-main/example.py |
# python3 -m pip install foldrpp
from foldrpp import *
from timeit import default_timer as timer
from datetime import timedelta
def main():
attrs = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship',
'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country']
nums = ['age', 'fnlwgt', 'education_num', 'capital_gain', 'capital_loss', 'hours_per_week']
model = Classifier(attrs=attrs, numeric=nums, label='label', pos='<=50K')
data = model.load_data('data/adult/adult.csv')
print('\n% adult dataset', len(data), len(data[0]))
data_train, data_test = split_data(data, ratio=0.8, rand=True)
X_train, Y_train = split_xy(data_train)
X_test, Y_test = split_xy(data_test)
start = timer()
model.fit(X_train, Y_train, ratio=0.5)
end = timer()
save_model_to_file(model, 'example.model')
# model.print_asp(simple=True)
Y_test_hat = model.predict(X_test)
acc, p, r, f1 = get_scores(Y_test_hat, Y_test)
print('% acc', round(acc, 4), 'p', round(p, 4), 'r', round(r, 4), 'f1', round(f1, 4))
print('% foldr++ costs: ', timedelta(seconds=end - start), '\n')
model2 = load_model_from_file('example.model')
model2.print_asp(simple=True)
# k = 1
# for i in range(10):
# print('Explanation for example number', k, ':')
# print(model.explain(X_test[i], all_flag=False))
# print('Proof Trees for example number', k, ':')
# print(model.proof(X_test[i], all_flag=False))
# k += 1
if __name__ == '__main__':
main()
| 1,616 | 32.6875 | 120 | py |
FOLD-R-PP | FOLD-R-PP-main/translate_utils.py | # This file only provide two functions "translate_rules" and "translate_proof"
# if you need to use these two functions:
# ------------------------------------------------------------
# from translate_utils import translate_rules, translate_proof
# print(translate_rules(model, file='path to template'))
# for x in X:
# print(translate_proof(model, x, file='path to template'))
# ------------------------------------------------------------
# Please find more examples in main and titanic_test functions in this file.
from algo import evaluate, justify_one
from foldrpp import *
from datasets import *
from timeit import default_timer as timer
from datetime import timedelta
def load_template(file):
f = open(file, 'r')
ret = dict()
for line in f.readlines():
line = line.strip('\n')
if len(line) == 0:
continue
if line[0] == '#' and 'pred' in line:
strs = line.split('pred')[1]
strs = strs.split('::')
head = strs[0].strip(' ')
tail = strs[1].strip(' ')
heads = head.split('(')
k, paras = heads[0].lower().replace(' ', '_'), heads[1].strip(')').split(',')
if len(paras) > 1 and isinstance(paras[1], str) and not ('A' <= paras[1] <= 'Z'):
ret[(k, 'parameter', paras[1])] = paras
if tail[0] == '\'':
tails = tail.split('\'')
tail = tails[1]
elif tail[0] == '\"':
tails = tail.split('\"')
tail = tails[1]
ret[(k, 'phrase', paras[1])] = tail
else:
ret[(k, 'parameter')] = paras
if tail[0] == '\'':
tails = tail.split('\'')
tail = tails[1]
elif tail[0] == '\"':
tails = tail.split('\"')
tail = tails[1]
ret[(k, 'phrase')] = tail
return ret
def translate(rules, attrs, tmpl={}):
ret = []
nr = {'<=': '>', '>': '<=', '==': '!=', '!=': '=='}
def _f0(i, r, v):
k = attrs[i].lower().replace(' ', '_')
if isinstance(v, str):
v = v.lower().replace(' ', '_')
v = 'null' if len(v) == 0 else v
if isinstance(v, str) and (k, 'parameter', v) in tmpl:
para, s = tmpl[(k, 'parameter', v)], tmpl[(k, 'phrase', v)]
elif (k, 'parameter') in tmpl:
para, s = tmpl[(k, 'parameter')], tmpl[(k, 'phrase')]
else:
if r == '==':
s = 'the value of ' + k + ' is \'' + v + '\''
elif r == '!=':
s = 'the value of ' + k + ' is not \'' + v + '\''
else:
if r == '<=':
s = 'the value of ' + k + ' is less equal to ' + str(round(v, 3))
else:
s = 'the value of ' + k + ' is greater than ' + str(round(v, 3))
return s
if len(para) < 2:
s = s.replace('@(' + para[0] + ')', 'X')
if r == '!=':
s = ' not ' + s
return s
else:
if r == '==':
s = s.replace('@(' + para[0] + ')', 'X').replace('@(' + para[1] + ')', str(v))
return s
s = s.replace('@(' + para[0] + ')', 'X').replace('@(' + para[1] + ')', 'N' + str(i))
if r == '!=':
s = s + ' where N' + str(i) + ' is not ' + v
elif r == '<=':
s = s + ' where N' + str(i) + ' is less equal to ' + str(round(v, 3))
else:
s = s + ' where N' + str(i) + ' is greater than ' + str(round(v, 3))
return s
def _f1(it):
if isinstance(it, tuple) and len(it) == 3:
i, r, v = it[0], it[1], it[2]
if i < -1:
i = -i - 2
r = nr[r]
return _f0(i, r, v)
elif it == -1:
heads = attrs[-1].split('(')
k, paras = heads[0].lower().replace(' ', '_'), heads[1].replace('\'', '').strip(')').split(',')
if len(paras) < 2 and (k, 'parameter') in tmpl:
para, s = tmpl[(k, 'parameter')], tmpl[(k, 'phrase')]
s = s.replace('@(' + para[0] + ')', 'X')
else:
v = paras[1]
if (k, 'parameter', v) in tmpl:
para, s = tmpl[(k, 'parameter', v)], tmpl[(k, 'phrase', v)]
s = s.replace('@(' + para[0] + ')', 'X').replace('@(' + para[1] + ')', str(v))
else:
s = attrs[-1]
return s
else:
k = 'ab' + str(abs(it) - 1)
if (k, 'parameter') in tmpl:
para, s = tmpl[(k, 'parameter')], tmpl[(k, 'phrase')]
s = s.replace('@(' + para[0] + ')', 'X')
else:
s = 'exception ab' + str(abs(it) - 1)
return s
def _f3(rule, indent=0):
head = '\t' * indent + _f1(rule[0]) + ' is True, if \n'
body = ''
for i in list(rule[1]):
body = body + '\t' * (indent + 1) + _f1(i) + ' and\n'
tail = ''
for i in list(rule[2]):
for r in rules:
if i == r[0]:
tail = tail + '\t' * (indent + 1) + _f1(i) + ' is False and\n'
_ret = head + body + tail
chars = list(_ret)
_ret = ''.join(chars)
if _ret.endswith('and\n'):
_ret = _ret[:-4] + '\n\n'
return _ret
for _r in rules:
ret.append(_f3(_r))
return ret
def proof_trans(rules, attrs, x, tmpl={}):
ret = []
nr = {'<=': '>', '>': '<=', '==': '!=', '!=': '=='}
def _f0(i, r, v):
k = attrs[i].lower().replace(' ', '_')
if isinstance(v, str):
v = v.lower().replace(' ', '_')
v = 'null' if len(v) == 0 else v
if isinstance(v, str) and (k, 'parameter', v) in tmpl:
para, s = tmpl[(k, 'parameter', v)], tmpl[(k, 'phrase', v)]
elif (k, 'parameter') in tmpl:
para, s = tmpl[(k, 'parameter')], tmpl[(k, 'phrase')]
else:
if r == '==':
s = 'the value of ' + k + ' is \'' + v + '\''
elif r == '!=':
s = 'the value of ' + k + ' is not \'' + v + '\''
else:
if r == '<=':
s = 'the value of ' + k + ' is less equal to ' + str(round(v, 3))
else:
s = 'the value of ' + k + ' is greater than ' + str(round(v, 3))
return s
if len(para) < 2:
s = s.replace('@(' + para[0] + ')', 'X')
if r == '!=':
s = ' not ' + s
return s
else:
if r == '==':
s = s.replace('@(' + para[0] + ')', 'X').replace('@(' + para[1] + ')', str(v))
return s
s = s.replace('@(' + para[0] + ')', 'X').replace('@(' + para[1] + ')', 'N' + str(i))
if r == '!=':
s = s + ' where N' + str(i) + ' is not ' + v
elif r == '<=':
s = s + ' where N' + str(i) + ' is less equal to ' + str(round(v, 3))
else:
s = s + ' where N' + str(i) + ' is greater than ' + str(round(v, 3))
return s
def _f2(i, r, v):
k = attrs[i].lower().replace(' ', '_')
v = x[i]
if isinstance(v, str):
v = v.lower().replace(' ', '_')
v = 'null' if len(v) == 0 else v
if isinstance(v, str) and (k, 'parameter', v) in tmpl:
para, s = tmpl[(k, 'parameter', v)], tmpl[(k, 'phrase', v)]
elif (k, 'parameter') in tmpl:
para, s = tmpl[(k, 'parameter')], tmpl[(k, 'phrase')]
else:
if isinstance(v, str):
s = 'the value of ' + k + ' is \'' + str(v) + '\''
else:
s = 'the value of ' + k + ' is \'' + str(round(v, 3)) + '\''
return s
if len(para) < 2:
s = s.replace('@(' + para[0] + ')', 'X')
return s
else:
if isinstance(v, str):
s = s.replace('@(' + para[0] + ')', 'X').replace('@(' + para[1] + ')', str(v))
else:
s = s.replace('@(' + para[0] + ')', 'X').replace('@(' + para[1] + ')', str(round(v, 3)))
return s
def _f4(it):
prefix = ' DOES HOLD because ' if evaluate(it, x) else ' DOES NOT HOLD because '
if isinstance(it, tuple) and len(it) == 3:
i, r, v = it[0], it[1], it[2]
if i < -1:
i = -i - 2
r = nr[r]
return prefix + _f2(i, r, v)
elif it == -1:
heads = attrs[-1].split('(')
k, paras = heads[0].lower().replace(' ', '_'), heads[1].replace('\'', '').strip(')').split(',')
if len(paras) < 2 and (k, 'parameter') in tmpl:
para, s = tmpl[(k, 'parameter')], tmpl[(k, 'phrase')]
s = s.replace('@(' + para[0] + ')', 'X')
else:
v = paras[1]
v = x[it[0]]
if (k, 'parameter', v) in tmpl:
para, s = tmpl[(k, 'parameter', v)], tmpl[(k, 'phrase', v)]
s = s.replace('@(' + para[0] + ')', 'X').replace('@(' + para[1] + ')', str(v))
else:
s = attrs[-1]
return prefix + s
else:
k = 'ab' + str(abs(it) - 1)
if (k, 'parameter') in tmpl:
para, s = tmpl[(k, 'parameter')], tmpl[(k, 'phrase')]
s = s.replace('@(' + para[0] + ')', 'X')
else:
s = 'exception ab' + str(abs(it) - 1)
return prefix + s
def _f1(it):
if isinstance(it, tuple) and len(it) == 3:
i, r, v = it[0], it[1], it[2]
if i < -1:
i = -i - 2
r = nr[r]
return '' + _f0(i, r, v)
elif it == -1:
suffix = ' DOES HOLD ' if justify_one(rules, x, it)[0] else ' DOES NOT HOLD '
heads = attrs[-1].split('(')
k, paras = heads[0].lower().replace(' ', '_'), heads[1].replace('\'', '').strip(')').split(',')
if len(paras) < 2 and (k, 'parameter') in tmpl:
para, s = tmpl[(k, 'parameter')], tmpl[(k, 'phrase')]
s = s.replace('@(' + para[0] + ')', 'X')
else:
v = paras[1]
if (k, 'parameter', v) in tmpl:
para, s = tmpl[(k, 'parameter', v)], tmpl[(k, 'phrase', v)]
s = s.replace('@(' + para[0] + ')', 'X').replace('@(' + para[1] + ')', str(v))
else:
s = attrs[-1]
return s + suffix
else:
if it not in [r[0] for r in rules]:
suffix = ''
else:
suffix = ' DOES HOLD ' if justify_one(rules, x, it)[0] else ' DOES NOT HOLD '
k = 'ab' + str(abs(it) - 1)
if (k, 'parameter') in tmpl:
para, s = tmpl[(k, 'parameter')], tmpl[(k, 'phrase')]
s = s.replace('@(' + para[0] + ')', 'X')
else:
s = 'exception ab' + str(abs(it) - 1)
return s + suffix
def _f3(rule, indent=0):
head = '\t' * indent + _f1(rule[0]) + 'because \n'
body = ''
for i in list(rule[1]):
body = body + '\t' * (indent + 1) + _f1(i) + '' + _f4(i) + ' and\n'
tail = ''
for i in list(rule[2]):
for r in rules:
if i == r[0]:
tail = tail + _f3(r, indent + 1)
_ret = head + body + tail
chars = list(_ret)
_ret = ''.join(chars)
if _ret.endswith('and\n'):
_ret = _ret[:-4] + '\n'
return _ret
for _r in rules:
if _r[0] == -1:
ret.append(_f3(_r))
return ret
def translate_rules(model, file=None):
if file:
tmpl = load_template(file)
text = translate(model.frs, model.attrs, tmpl)
else:
text = translate(model.frs, model.attrs)
ret = ''
for t in text:
ret = ret + t
return ret
def translate_proof(model, x, all_flag=False, file=None):
ret = ''
model.asp()
all_pos = justify(model.frs, x, all_flag=all_flag)
k = 1
tmpl = load_template(file) if file else {}
if len(all_pos) == 0:
all_neg = rebut(model.frs, x)
for rs in all_neg:
ret += 'rebuttal ' + str(k) + ':\n'
for r in proof_trans(rs, attrs=model.attrs, x=x, tmpl=tmpl):
ret += r
ret += str(justify_data(rs, x, attrs=model.attrs)) + '\n'
k += 1
else:
for rs in all_pos:
ret += 'answer ' + str(k) + ':\n'
for r in proof_trans(rs, attrs=model.attrs, x=x, tmpl=tmpl):
ret += r
ret += str(justify_data(rs, x, attrs=model.attrs)) + '\n'
k += 1
return ret
def titanic_test():
model, data_train, data_test = titanic()
X_train, Y_train = split_xy(data_train)
X_test, Y_test = split_xy(data_test)
model.fit(X_train, Y_train, ratio=0.5)
Y_test_hat = model.predict(X_test)
model.print_asp()
acc, p, r, f1 = get_scores(Y_test_hat, Y_test)
print('% acc', round(acc, 4), 'p', round(p, 4), 'r', round(r, 4), 'f1', round(f1, 4), '\n')
print(translate_rules(model, file='data/titanic/template.txt'))
# exit()
k = 1
for i in range(len(X_test)):
print('Proof Trees for example number', k, ':')
print(translate_proof(model, X_test[i], file='data/titanic/template.txt'))
k += 1
def main():
# model, data = acute()
# model, data = autism()
model, data = breastw()
# model, data = cars()
# model, data = credit()
# model, data = heart()
# model, data = kidney()
# model, data = krkp()
# model, data = mushroom()
# model, data = sonar()
# model, data = voting()
# model, data = ecoli()
# model, data = ionosphere()
# model, data = wine()
# model, data = adult()
# model, data = credit_card()
# model, data = rain()
# model, data = heloc()
data_train, data_test = split_data(data, ratio=0.8, rand=True)
X_train, Y_train = split_xy(data_train)
X_test, Y_test = split_xy(data_test)
start = timer()
model.fit(X_train, Y_train, ratio=0.6)
end = timer()
model.print_asp()
Y_test_hat = model.predict(X_test)
acc, p, r, f1 = get_scores(Y_test_hat, Y_test)
print('% acc', round(acc, 4), 'p', round(p, 4), 'r', round(r, 4), 'f1', round(f1, 4))
print('% foldr++ costs: ', timedelta(seconds=end - start), '\n')
print(translate_rules(model))
# exit()
k = 1
for i in range(len(X_test)):
print('Proof Trees for example number', k, ':')
print(translate_proof(model, X_test[i]))
k += 1
if __name__ == '__main__':
# titanic_test()
main()
| 15,088 | 35.982843 | 107 | py |
FOLD-R-PP | FOLD-R-PP-main/scasp_utils.py | import random
import subprocess
import tempfile
from foldrpp import *
from datasets import *
def load_data(file, numerics, amount=-1):
f = open(file, 'r')
attr_idx, num_idx = [], []
ret, i, k = [], 0, 0
attrs = []
for line in f.readlines():
line = line.strip('\n').split(',')
if i == 0:
attrs = [line[j].lower().replace(' ', '_') for j in range(len(line))]
num_idx = [j for j in range(len(line)) if line[j] in numerics]
else:
r = [j for j in range(len(line))]
for j in range(len(line)):
if j in num_idx:
try:
r[j] = float(line[j])
except:
r[j] = line[j]
else:
r[j] = line[j]
ret.append(r)
i += 1
amount -= 1
if amount == 0:
break
return ret, attrs
def decode_data(data, attrs, seq=0, label_flag=False):
ret = []
n = len(data[0]) if len(data) > 0 else 0
i = seq
if label_flag:
n += 1
for d in data:
line = ['id(' + str(i) + ').']
for j in range(n - 1):
if isinstance(d[j], float) or isinstance(d[j], int):
pred = attrs[j].lower() + '(' + str(i) + ',' + str(d[j]) + ').'
elif len(d[j]) > 0:
pred = attrs[j].lower() + '(' + str(i) + ',\'' + \
str(d[j]).lower().replace(' ', '_').replace('\'', '') \
.replace('\"', '').replace('.', '') + '\').'
else:
pred = attrs[j].lower() + '(' + str(i) + ',\'' + 'null' + '\').'
line.append(pred)
ret.append(line)
i += 1
return ret
def load_data_pred(file, numerics, seq=0, label_flag=False, amount=-1):
data, attrs = load_data(file, numerics=numerics, amount=amount)
data_pred = decode_data(data, attrs, seq=seq, label_flag=label_flag)
return data_pred
def split_data_pred(X, Y, X_pred, ratio=0.8, rand=True):
n = len(Y)
k = int(n * ratio)
train = []
for i in range(k):
train.append(i)
if rand:
for i in range(k, n):
j = random.randint(0, i)
if j < k:
train[j] = i
X_train = [X[i] for i in range(n) if i in set(train)]
Y_train = [Y[i] for i in range(n) if i in set(train)]
X_test_pred = [X_pred[i] for i in range(n) if i not in set(train)]
return X_train, Y_train, X_test_pred
def load_translation(model, file):
model.translation = []
f = open(file, 'r')
for line in f.readlines():
model.translation.append(line.strip('\n'))
def save_asp_to_file(model, file):
if model.asp() is None:
return
f = open(file, 'w')
for r in model.asp_rules:
f.write(r + '\n')
f.close()
def scasp_query(model, x):
if model.asp() is None:
return
tf = tempfile.NamedTemporaryFile()
for r in model.asp_rules:
tf.write((r + '\n').encode())
if model.translation is not None:
for t in model.translation:
tf.write((t + '\n').encode())
data_pred = decode_data([x], model.attrs, model.seq)
for preds in data_pred:
for p in preds:
tf.write((p + '\n').encode())
seq = str(model.seq)
model.seq += 1
if model.classify(x):
extra = 'explain(X):- '
else:
extra = 'explain(X):- not '
extra += model.label.lower().replace(' ', '_') + '(X,\'' + model.pos.lower().replace(' ', '_') + '\').\n'
tf.write(extra.encode())
query = '?- explain(' + seq + ').'
tf.write(query.encode())
tf.flush()
if model.classify(x):
command = 'scasp' + ' -s1 --tree --human --pos ' + tf.name
else:
command = 'scasp' + ' -s0 --tree --human --pos ' + tf.name
res = subprocess.run([command], stdout=subprocess.PIPE, shell=True).stdout.decode('utf-8')
tf.close()
return res
def titanic_test():
model, data_train, data_test = titanic()
X_train, Y_train = split_xy(data_train)
X_test, Y_test = split_xy(data_test)
model.fit(X_train, Y_train, ratio=0.5)
model.print_asp()
# save_asp_to_file(model, 'data/titanic/asp.txt')
load_translation(model, 'data/titanic/template.txt')
for i in range(len(X_test)):
print(model.classify(X_test[i]))
res = scasp_query(model, X_test[i])
print(res)
if __name__ == '__main__':
titanic_test()
| 4,496 | 28.392157 | 109 | py |
FOLD-R-PP | FOLD-R-PP-main/algo.py | import math
def evaluate(rule, x):
def __eval(i, r, v):
if i < -1:
return __eval(-2 - i, r, v) ^ 1
if isinstance(v, str):
if r == '==':
return x[i] == v
elif r == '!=':
return x[i] != v
else:
return False
elif isinstance(x[i], str):
return False
elif r == '<=':
return x[i] <= v
elif r == '>':
return x[i] > v
else:
return False
def _eval(i):
if len(i) == 3:
return __eval(i[0], i[1], i[2])
elif len(i) == 4:
return evaluate(i, x)
if len(rule) == 0:
return 0
if len(rule) == 3:
return __eval(rule[0], rule[1], rule[2])
if rule[3] == 0 and not all([_eval(i) for i in rule[1]]):
return 0
if rule[3] == 1 and not any([_eval(i) for i in rule[1]]):
return 0
if len(rule[2]) > 0 and any([_eval(i) for i in rule[2]]):
return 0
return 1
def cover(rules, x, y):
return int(evaluate(rules, x) == y)
def classify(rules, x):
return int(any([evaluate(r, x) for r in rules]))
def predict(rules, X):
ret = []
for x in X:
ret.append(classify(rules, x))
return ret
def gain(tp, fn, tn, fp):
if tp + tn < fp + fn:
return float('-inf')
ret = 0
tot_p, tot_n = float(tp + fp), float(tn + fn)
tot = float(tot_p + tot_n)
ret += tp / tot * math.log(tp / tot_p) if tp > 0 else 0
ret += fp / tot * math.log(fp / tot_p) if fp > 0 else 0
ret += tn / tot * math.log(tn / tot_n) if tn > 0 else 0
ret += fn / tot * math.log(fn / tot_n) if fn > 0 else 0
return ret
def best_ig(X_pos, X_neg, i, used_items=[]):
xp, xn, cp, cn = 0, 0, 0, 0
pos, neg = dict(), dict()
xs, cs = set(), set()
for d in X_pos:
if d[i] not in pos:
pos[d[i]], neg[d[i]] = 0, 0
pos[d[i]] += 1.0
if isinstance(d[i], str):
cs.add(d[i])
cp += 1.0
else:
xs.add(d[i])
xp += 1.0
for d in X_neg:
if d[i] not in neg:
pos[d[i]], neg[d[i]] = 0, 0
neg[d[i]] += 1.0
if isinstance(d[i], str):
cs.add(d[i])
cn += 1.0
else:
xs.add(d[i])
xn += 1.0
xs = list(xs)
xs.sort()
for j in range(1, len(xs)):
pos[xs[j]] += pos[xs[j - 1]]
neg[xs[j]] += neg[xs[j - 1]]
best, v, r = float('-inf'), float('-inf'), ''
for x in xs:
if (i, '<=', x) in used_items or (i, '>', x) in used_items:
continue
ig = gain(pos[x], xp - pos[x] + cp, xn - neg[x] + cn, neg[x])
if best < ig:
best, v, r = ig, x, '<='
ig = gain(xp - pos[x], pos[x] + cp, neg[x] + cn, xn - neg[x])
if best < ig:
best, v, r = ig, x, '>'
for c in cs:
if (i, '==', c) in used_items or (i, '!=', c) in used_items:
continue
ig = gain(pos[c], cp - pos[c] + xp, cn - neg[c] + xn, neg[c])
if best < ig:
best, v, r = ig, c, '=='
ig = gain(cp - pos[c] + xp, pos[c], neg[c], cn - neg[c] + xn)
if best < ig:
best, v, r = ig, c, '!='
return best, r, v
def best_item(X_pos, X_neg, used_items=[]):
ret = -1, '', ''
if len(X_pos) == 0 and len(X_neg) == 0:
return ret
n = len(X_pos[0]) if len(X_pos) > 0 else len(X_neg[0])
best = float('-inf')
for i in range(n):
ig, r, v = best_ig(X_pos, X_neg, i, used_items)
if best < ig:
best = ig
ret = i, r, v
return ret
def fold(X_pos, X_neg, used_items=[], ratio=0.5):
ret = []
while len(X_pos) > 0:
rule = learn_rule(X_pos, X_neg, used_items, ratio)
X_fn = [X_pos[i] for i in range(len(X_pos)) if not cover(rule, X_pos[i], 1)]
if len(X_pos) == len(X_fn):
break
X_pos = X_fn
ret.append(rule)
return ret
def learn_rule(X_pos, X_neg, used_items=[], ratio=0.5):
items = []
while True:
t = best_item(X_pos, X_neg, used_items + items)
items.append(t)
rule = -1, items, [], 0
X_pos = [X_pos[i] for i in range(len(X_pos)) if cover(rule, X_pos[i], 1)]
X_neg = [X_neg[i] for i in range(len(X_neg)) if cover(rule, X_neg[i], 1)]
if t[0] == -1 or len(X_neg) <= len(X_pos) * ratio:
if t[0] == -1:
rule = -1, items[:-1], [], 0
if len(X_neg) > 0 and t[0] != -1:
ab = fold(X_neg, X_pos, used_items + items, ratio)
if len(ab) > 0:
rule = rule[0], rule[1], ab, 0
break
return rule
def flatten_rules(rules):
ret = []
abrules = []
rule_map = dict()
flatten_rules.ab = -2
def _eval(i):
if isinstance(i, tuple) and len(i) == 3:
return i
elif isinstance(i, tuple):
return _flatten(i)
def _flatten(rule, root=False):
t = (tuple(rule[1]), tuple([_eval(i) for i in rule[2]]))
if t not in rule_map:
rule_map[t] = -1 if root else flatten_rules.ab
_ret = rule_map[t]
if root:
ret.append((_ret, t[0], t[1]))
else:
abrules.append((_ret, t[0], t[1]))
flatten_rules.ab -= 1
elif root:
ret.append((rule[0], t[0], t[1]))
return rule_map[t]
for r in rules:
_flatten(r, root=True)
return ret + abrules
def justify_one(frs, x, idx=-1, pos=[], start=0):
for j in range(start, len(frs)):
r = frs[j]
i, d, ab = r[0], r[1], r[2]
if i != idx:
continue
if not all([evaluate(_j, x) for _j in d]):
continue
if len(ab) > 0 and any([justify_one(frs, x, idx=_j, pos=pos)[0] for _j in ab]):
continue
pos.append(r)
return 1, j
if idx < -1:
for r in frs:
if r[0] == idx:
pos.append(r)
return 0, -1
def justify(frs, x, all_flag=False):
ret = []
i = 0
while i < len(frs):
pos = []
res, i = justify_one(frs, x, pos=pos, start=i)
if res:
ret.append(pos)
i += 1
if not all_flag:
break
else:
break
return ret
def rebut_one(frs, x, idx=-1, neg=[], start=0):
for j in range(start, len(frs)):
r = frs[j]
i, d, ab = r[0], r[1], r[2]
if i != idx:
continue
if not all([evaluate(_j, x) for _j in d]):
neg.append(r)
return 0, j
if len(ab) > 0:
for _j in ab:
if justify_one(frs, x, idx=_j, pos=neg)[0]:
neg.append(r)
return 0, j
continue
return 1, -1
def rebut(frs, x, all_flag=True):
ret = []
i = 0
while i < len(frs):
neg = []
res, i = rebut_one(frs, x, neg=neg, start=i)
if not res:
ret.append(neg)
i += 1
if not all_flag:
break
else:
break
return ret
| 7,241 | 26.225564 | 87 | py |
FOLD-R-PP | FOLD-R-PP-main/utils.py | import random
from algo import evaluate, justify_one
def load_data(file, attrs, label, numerics, pos='', amount=-1):
f = open(file, 'r')
attr_idx, num_idx, lab_idx = [], [], -1
ret, i, k = [], 0, 0
head = ''
for line in f.readlines():
if i == 0:
line = line.strip('\n').split(',')
attr_idx = [j for j in range(len(line)) if line[j] in attrs]
num_idx = [j for j in range(len(line)) if line[j] in numerics]
for j in range(len(line)):
if line[j] == label:
lab_idx = j
head += line[j].lower().replace(' ', '_')
head += '(X,'
if isinstance(pos, str):
head += '\'' + pos.lower().replace(' ', '_') + '\')'
else:
head += pos.lower().replace(' ', '_') + ')'
else:
line = line.strip('\n').split(',')
r = [j for j in range(len(line))]
for j in range(len(line)):
if j in num_idx:
try:
r[j] = float(line[j])
except:
r[j] = line[j]
else:
r[j] = line[j]
r = [r[j] for j in attr_idx]
if lab_idx != -1:
y = 1 if line[lab_idx] == pos else 0
r.append(y)
ret.append(r)
i += 1
amount -= 1
if amount == 0:
break
attrs.append(head)
return ret, attrs
def split_xy(data):
feature, label = [], []
for d in data:
feature.append(d[: -1])
label.append(int(d[-1]))
return feature, label
def split_X_by_Y(X, Y):
n = len(Y)
X_pos = [X[i] for i in range(n) if Y[i]]
X_neg = [X[i] for i in range(n) if not Y[i]]
return X_pos, X_neg
def split_data(data, ratio=0.8, rand=True):
if rand:
random.shuffle(data)
num = int(len(data) * ratio)
train, test = data[: num], data[num:]
return train, test
def get_scores(Y_hat, Y):
n = len(Y)
if n == 0:
return 0, 0, 0, 0
tp, tn, fp, fn = 0, 0, 0, 0
for i in range(n):
tp = tp + 1.0 if Y[i] and Y_hat[i] == Y[i] else tp
tn = tn + 1.0 if not Y[i] and Y_hat[i] == Y[i] else tn
fn = fn + 1.0 if Y[i] and Y_hat[i] != Y[i] else fn
fp = fp + 1.0 if not Y[i] and Y_hat[i] != Y[i] else fp
if tp < 1:
p = 0 if fp < 1 else tp / (tp + fp)
r = 0 if fn < 1 else tp / (tp + fn)
else:
p, r = tp / (tp + fp), tp / (tp + fn)
f1 = 0 if r * p == 0 else 2 * r * p / (r + p)
return (tp + tn) / n, p, r, f1
def justify_data(frs, x, attrs):
ret = []
for r in frs:
d = r[1]
for j in d:
ret.append(attrs[j[0]] + ': ' + str(x[j[0]]))
return set(ret)
def decode_rules(rules, attrs, x=None):
ret = []
nr = {'<=': '>', '>': '<=', '==': '!=', '!=': '=='}
def _f1(it):
prefix, not_prefix = '', ''
if isinstance(it, tuple) and len(it) == 3:
if x is not None:
prefix = '[T]' if evaluate(it, x) else '[F]'
not_prefix = '[T]' if prefix == '[F]' else '[F]'
i, r, v = it[0], it[1], it[2]
if i < -1:
i = -2 - i
r = nr[r]
k = attrs[i].lower().replace(' ', '_')
if isinstance(v, str):
v = v.lower().replace(' ', '_')
v = 'null' if len(v) == 0 else '\'' + v + '\''
if r == '==':
return prefix + k + '(X,' + v + ')'
elif r == '!=':
return 'not ' + not_prefix + k + '(X,' + v + ')'
else:
return prefix + k + '(X,' + 'N' + str(i) + ')' + ', N' + str(i) + r + str(round(v, 3))
elif it == -1:
if x is not None:
prefix = '[T]' if justify_one(rules, x, it)[0] else '[F]'
return prefix + attrs[-1]
else:
if x is not None:
if it not in [r[0] for r in rules]:
prefix = '[U]'
else:
prefix = '[T]' if justify_one(rules, x, it)[0] else '[F]'
return prefix + 'ab' + str(abs(it) - 1) + '(X)'
def _f2(rule):
head = _f1(rule[0])
body = ''
for i in list(rule[1]):
body = body + _f1(i) + ', '
tail = ''
for i in list(rule[2]):
t = _f1(i)
if 'not' not in t:
tail = tail + 'not ' + _f1(i) + ', '
else:
t = t.replace('not ', '')
tail = tail + t + ', '
_ret = head + ' :- ' + body + tail
chars = list(_ret)
chars[-2] = '.'
_ret = ''.join(chars)
_ret = _ret.replace('<=', '=<')
return _ret
for _r in rules:
ret.append(_f2(_r))
return ret
def zip_rule(rule):
tab, dft = {}, []
for i in rule[1]:
if isinstance(i[2], str):
dft.append(i)
else:
if i[0] not in tab:
tab[i[0]] = []
if i[1] == '<=':
tab[i[0]].append([float('-inf'), i[2]])
else:
tab[i[0]].append([i[2], float('inf')])
nums = [t for t in tab]
nums.sort()
for i in nums:
left, right = float('inf'), float('-inf')
for j in tab[i]:
if j[0] == float('-inf'):
left = min(left, j[1])
else:
right = max(right, j[0])
if left == float('inf'):
dft.append((i, '>', right))
elif right == float('-inf'):
dft.append((i, '<=', left))
else:
dft.append((i, '>', right))
dft.append((i, '<=', left))
return rule[0], dft, rule[2], 0
def simplify_rule(rule):
head, body = rule.split(' :- ')
items = body.split(', ')
items = list(dict.fromkeys(items))
body = ', '.join(items)
return head + ' :- ' + body
def proof_tree(rules, attrs, x):
ret = []
nr = {'<=': '>', '>': '<=', '==': '!=', '!=': '=='}
def _f1(it):
if isinstance(it, tuple) and len(it) == 3:
suffix = ' (DOES HOLD) ' if evaluate(it, x) else ' (DOES NOT HOLD) '
i, r, v = it[0], it[1], it[2]
if i < -1:
i = -2 - i
r = nr[r]
k = attrs[i].lower().replace(' ', '_')
if isinstance(v, str):
v = v.lower().replace(' ', '_')
v = 'null' if len(v) == 0 else '\'' + v + '\''
xi = x[i]
if isinstance(xi, str):
xi = xi.lower().replace(' ', '_')
xi = '\'null\'' if len(xi) == 0 else xi
if r == '==':
return 'the value of ' + k + ' is \'' + str(xi) + '\' which should equal ' + v + suffix
elif r == '!=':
return 'the value of ' + k + ' is \'' + str(xi) + '\' which should not equal ' + v + suffix
else:
if r == '<=':
return 'the value of ' + k + ' is ' + str(xi) + ' which should be less equal to ' + str(round(v, 3)) + suffix
else:
return 'the value of ' + k + ' is ' + str(xi) + ' which should be greater than ' + str(round(v, 3)) + suffix
elif it == -1:
suffix = ' DOES HOLD ' if justify_one(rules, x, it)[0] else ' DOES NOT HOLD '
return attrs[-1] + suffix
else:
if it not in [r[0] for r in rules]:
suffix = ''
else:
suffix = ' DOES HOLD ' if justify_one(rules, x, it)[0] else ' DOES NOT HOLD '
return 'exception ab' + str(abs(it) - 1) + suffix
def _f2(rule, indent=0):
head = '\t' * indent + _f1(rule[0]) + 'because \n'
body = ''
for i in list(rule[1]):
body = body + '\t' * (indent + 1) + _f1(i) + '\n'
tail = ''
for i in list(rule[2]):
for r in rules:
if i == r[0]:
tail = tail + _f2(r, indent + 1)
_ret = head + body + tail
chars = list(_ret)
_ret = ''.join(chars)
return _ret
for _r in rules:
if _r[0] == -1:
ret.append(_f2(_r))
return ret
def num_predicates(rules):
def _n_pred(rule):
return len(rule[1] + rule[2])
ret = 0
for r in rules:
ret += _n_pred(r)
return ret
| 8,536 | 30.973783 | 129 | py |
FOLD-R-PP | FOLD-R-PP-main/foldrpp.py | from utils import load_data, split_xy, split_X_by_Y, \
split_data, get_scores, justify_data, decode_rules, proof_tree, zip_rule, simplify_rule
from algo import fold, predict, classify, flatten_rules, justify, rebut
import pickle
class Classifier:
def __init__(self, attrs=None, numeric=None, label=None, pos=None):
self.attrs = attrs
self.numeric = numeric
self.label = label
self.pos = pos
self.rules = None
self.frs = None
self.asp_rules = None
self.seq = 1
self.simple = None
self.translation = None
def load_data(self, file, amount=-1):
data, self.attrs = load_data(file, self.attrs, self.label, self.numeric, self.pos, amount)
return data
def fit(self, X, Y, ratio=0.5):
X_pos, X_neg = split_X_by_Y(X, Y)
self.rules = fold(X_pos, X_neg, ratio=ratio)
def predict(self, X):
return predict(self.rules, X)
def classify(self, x):
return classify(self.rules, x)
def asp(self, simple=False):
if (self.asp_rules is None and self.rules is not None) or self.simple != simple:
self.simple = simple
self.frs = flatten_rules(self.rules)
self.frs = [zip_rule(r) for r in self.frs]
self.asp_rules = decode_rules(self.frs, self.attrs)
if simple:
self.asp_rules = [simplify_rule(r) for r in self.asp_rules]
return self.asp_rules
def print_asp(self, simple=False):
for r in self.asp(simple):
print(r)
def explain(self, x, all_flag=False):
ret = ''
self.asp()
all_pos = justify(self.frs, x, all_flag=all_flag)
k = 1
if len(all_pos) == 0:
all_neg = rebut(self.frs, x)
for rs in all_neg:
ret += 'rebuttal ' + str(k) + ':\n'
for r in decode_rules(rs, attrs=self.attrs, x=x):
ret += r + '\n'
ret += str(justify_data(rs, x, attrs=self.attrs)) + '\n'
k += 1
else:
for rs in all_pos:
ret += 'answer ' + str(k) + ':\n'
for r in decode_rules(rs, attrs=self.attrs, x=x):
ret += r + '\n'
ret += str(justify_data(rs, x, attrs=self.attrs)) + '\n'
k += 1
return ret
def proof(self, x, all_flag=False):
ret = ''
self.asp()
all_pos = justify(self.frs, x, all_flag=all_flag)
k = 1
if len(all_pos) == 0:
all_neg = rebut(self.frs, x)
for rs in all_neg:
ret += 'rebuttal ' + str(k) + ':\n'
for r in proof_tree(rs, attrs=self.attrs, x=x):
ret += r
ret += str(justify_data(rs, x, attrs=self.attrs)) + '\n'
k += 1
else:
for rs in all_pos:
ret += 'answer ' + str(k) + ':\n'
for r in proof_tree(rs, attrs=self.attrs, x=x):
ret += r
ret += str(justify_data(rs, x, attrs=self.attrs)) + '\n'
k += 1
return ret
def save_model_to_file(model, file):
f = open(file, 'wb')
pickle.dump(model, f)
f.close()
def load_model_from_file(file):
f = open(file, 'rb')
ret = pickle.load(f)
f.close()
return ret
| 3,395 | 31.653846 | 98 | py |
FOLD-R-PP | FOLD-R-PP-main/datasets.py | from foldrpp import Classifier
def acute():
attrs = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6']
nums = ['a1']
model = Classifier(attrs=attrs, numeric=nums, label='label', pos='yes')
data = model.load_data('data/acute/acute.csv')
print('\n% acute dataset', len(data), len(data[0]))
return model, data
def adult():
attrs = ['age', 'workclass', 'fnlwgt', 'education', 'education_num', 'marital_status', 'occupation', 'relationship',
'race', 'sex', 'capital_gain', 'capital_loss', 'hours_per_week', 'native_country']
nums = ['age', 'fnlwgt', 'education_num', 'capital_gain', 'capital_loss', 'hours_per_week']
model = Classifier(attrs=attrs, numeric=nums, label='label', pos='<=50K')
data = model.load_data('data/adult/adult.csv')
print('\n% adult dataset', len(data), len(data[0]))
return model, data
def autism():
attrs = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'age', 'gender', 'ethnicity', 'jaundice',
'pdd', 'used_app_before', 'relation']
nums = ['age']
model = Classifier(attrs=attrs, numeric=nums, label='label', pos='NO')
data = model.load_data('data/autism/autism.csv')
print('\n% autism dataset', len(data), len(data[0]))
return model, data
def breastw():
attrs = ['clump_thickness', 'cell_size_uniformity', 'cell_shape_uniformity', 'marginal_adhesion',
'single_epi_cell_size', 'bare_nuclei', 'bland_chromatin', 'normal_nucleoli', 'mitoses']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='label', pos='benign')
data = model.load_data('data/breastw/breastw.csv')
print('\n% breastw dataset', len(data), len(data[0]))
return model, data
def cars():
attrs = ['buying', 'maint', 'doors', 'persons', 'lugboot', 'safety']
model = Classifier(attrs=attrs, numeric=[], label='label', pos='negative')
data = model.load_data('data/cars/cars.csv')
print('\n% cars dataset', len(data), len(data[0]))
return model, data
def credit():
attrs = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'a11', 'a12', 'a13', 'a14', 'a15']
nums = ['a2', 'a3', 'a8', 'a11', 'a14', 'a15']
model = Classifier(attrs=attrs, numeric=nums, label='label', pos='-')
data = model.load_data('data/credit/credit.csv')
print('\n% credit dataset', len(data), len(data[0]))
return model, data
def heart():
attrs = ['age', 'sex', 'chest_pain', 'blood_pressure', 'serum_cholestoral', 'fasting_blood_sugar',
'resting_electrocardiographic_results', 'maximum_heart_rate_achieved', 'exercise_induced_angina', 'oldpeak',
'slope', 'major_vessels', 'thal']
nums = ['age', 'blood_pressure', 'serum_cholestoral', 'maximum_heart_rate_achieved', 'oldpeak']
model = Classifier(attrs=attrs, numeric=nums, label='label', pos='absent')
data = model.load_data('data/heart/heart.csv')
print('\n% heart dataset', len(data), len(data[0]))
return model, data
def kidney():
attrs = ['age', 'bp', 'sg', 'al', 'su', 'rbc', 'pc', 'pcc', 'ba', 'bgr', 'bu', 'sc', 'sod', 'pot', 'hemo', 'pcv',
'wbcc', 'rbcc', 'htn', 'dm', 'cad', 'appet', 'pe', 'ane']
nums = ['age', 'bp', 'sg', 'bgr', 'bu', 'sc', 'sod', 'pot', 'hemo', 'pcv', 'wbcc', 'rbcc']
model = Classifier(attrs=attrs, numeric=nums, label='label', pos='ckd')
data = model.load_data('data/kidney/kidney.csv')
print('\n% kidney dataset', len(data), len(data[0]))
return model, data
def krkp():
attrs = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'a11', 'a12', 'a13', 'a14', 'a15', 'a16',
'a17', 'a18', 'a19', 'a20', 'a21', 'a22', 'a23', 'a24', 'a25', 'a26', 'a27', 'a28', 'a29', 'a30', 'a31', 'a32',
'a33', 'a34', 'a35', 'a36']
model = Classifier(attrs=attrs, numeric=[], label='label', pos='won')
data = model.load_data('data/krkp/krkp.csv')
print('\n% krkp dataset', len(data), len(data[0]))
return model, data
def mushroom():
attrs = ['cap_shape', 'cap_surface', 'cap_color', 'bruises', 'odor', 'gill_attachment', 'gill_spacing',
'gill_size', 'gill_color', 'stalk_shape', 'stalk_root', 'stalk_surface_above_ring', 'stalk_surface_below_ring',
'stalk_color_above_ring', 'stalk_color_below_ring', 'veil_type', 'veil_color', 'ring_number', 'ring_type',
'spore_print_color', 'population', 'habitat']
model = Classifier(attrs=attrs, numeric=[], label='label', pos='p')
data = model.load_data('data/mushroom/mushroom.csv')
print('\n% mushroom dataset', len(data), len(data[0]))
return model, data
def sonar():
attrs = ['a1', 'a2', 'a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10', 'a11', 'a12', 'a13', 'a14', 'a15', 'a16',
'a17', 'a18', 'a19', 'a20', 'a21', 'a22', 'a23', 'a24', 'a25', 'a26', 'a27', 'a28', 'a29', 'a30', 'a31', 'a32',
'a33', 'a34', 'a35', 'a36', 'a37', 'a38', 'a39', 'a40', 'a41', 'a42', 'a43', 'a44', 'a45', 'a46', 'a47', 'a48',
'a49', 'a50', 'a51', 'a52', 'a53', 'a54', 'a55', 'a56', 'a57', 'a58', 'a59', 'a60']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='label', pos='Mine')
data = model.load_data('data/sonar/sonar.csv')
print('\n% sonar dataset', len(data), len(data[0]))
return model, data
def voting():
attrs = ['handicapped_infants', 'water_project_cost_sharing', 'budget_resolution', 'physician_fee_freeze',
'el_salvador_aid', 'religious_groups_in_schools', 'anti_satellite_test_ban', 'aid_to_nicaraguan_contras',
'mx_missile', 'immigration', 'synfuels_corporation_cutback', 'education_spending', 'superfund_right_to_sue',
'crime', 'duty_free_exports', 'export_administration_act_south_africa']
model = Classifier(attrs=attrs, numeric=[], label='label', pos='republican')
data = model.load_data('data/voting/voting.csv')
print('\n% voting dataset', len(data), len(data[0]))
return model, data
def ecoli():
attrs = ['sn','mcg','gvh','lip','chg','aac','alm1','alm2']
nums = ['mcg','gvh','lip','chg','aac','alm1','alm2']
model = Classifier(attrs=attrs, numeric=nums, label='label', pos='cp')
data = model.load_data('data/ecoli/ecoli.csv')
print('\n% ecoli dataset', len(data), len(data[0]))
return model, data
def ionosphere():
attrs = ['c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9', 'c10', 'c11', 'c12', 'c13', 'c14', 'c15', 'c16',
'c17', 'c18', 'c19', 'c20', 'c21', 'c22', 'c23', 'c24', 'c25', 'c26', 'c27', 'c28', 'c29', 'c30', 'c31', 'c32',
'c33', 'c34']
model = Classifier(attrs=attrs, numeric=attrs, label='label', pos='g')
data = model.load_data('data/ionosphere/ionosphere.csv')
print('\n% ionosphere dataset', len(data), len(data[0]))
return model, data
def wine():
attrs = ['alcohol', 'malic_acid', 'ash', 'alcalinity_of_ash', 'magnesium', 'tot_phenols', 'flavanoids',
'nonflavanoid_phenols', 'proanthocyanins', 'color_intensity', 'hue', 'OD_of_diluted', 'proline']
model = Classifier(attrs=attrs, numeric=attrs, label='label', pos='3')
data = model.load_data('data/wine/wine.csv')
print('\n% wine dataset', len(data), len(data[0]))
return model, data
def credit_card():
attrs = ['LIMIT_BAL', 'SEX', 'EDUCATION', 'MARRIAGE', 'AGE', 'PAY_0', 'PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6',
'BILL_AMT1', 'BILL_AMT2', 'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6', 'PAY_AMT1', 'PAY_AMT2', 'PAY_AMT3',
'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6']
nums = ['LIMIT_BAL', 'AGE', 'BILL_AMT1', 'BILL_AMT2', 'BILL_AMT3', 'BILL_AMT4', 'BILL_AMT5', 'BILL_AMT6',
'PAY_AMT1', 'PAY_AMT2', 'PAY_AMT3', 'PAY_AMT4', 'PAY_AMT5', 'PAY_AMT6']
model = Classifier(attrs=attrs, numeric=nums, label='DEFAULT_PAYMENT', pos='0')
data = model.load_data('data/credit_card/credit_card.csv')
print('\n% credit card dataset', len(data), len(data[0]))
return model, data
def rain():
attrs = ['Month', 'Day', 'Location', 'MinTemp', 'MaxTemp', 'Rainfall', 'Evaporation', 'Sunshine', 'WindGustDir',
'WindGustSpeed', 'WindDir9am', 'WindDir3pm', 'WindSpeed9am', 'WindSpeed3pm', 'Humidity9am', 'Humidity3pm',
'Pressure9am', 'Pressure3pm', 'Cloud9am', 'Cloud3pm', 'Temp9am', 'Temp3pm', 'RainToday']
nums = ['Month', 'Day', 'MinTemp', 'MaxTemp', 'Rainfall', 'WindDir9am', 'WindDir3pm', 'WindSpeed9am',
'WindSpeed3pm', 'Humidity9am', 'Humidity3pm', 'Pressure9am', 'Pressure3pm', 'Temp9am', 'Temp3pm']
model = Classifier(attrs=attrs, numeric=nums, label='RainTomorrow', pos='No')
data = model.load_data('data/rain/rain.csv')
print('\n% rain dataset', len(data), len(data[0]))
return model, data
def heloc():
attrs = ['ExternalRiskEstimate', 'MSinceOldestTradeOpen', 'MSinceMostRecentTradeOpen', 'AverageMInFile',
'NumSatisfactoryTrades', 'NumTrades60Ever2DerogPubRec', 'NumTrades90Ever2DerogPubRec', 'PercentTradesNeverDelq',
'MSinceMostRecentDelq', 'MaxDelq2PublicRecLast12M', 'MaxDelqEver', 'NumTotalTrades', 'NumTradesOpeninLast12M',
'PercentInstallTrades', 'MSinceMostRecentInqexcl7days', 'NumInqLast6M', 'NumInqLast6Mexcl7days',
'NetFractionRevolvingBurden', 'NetFractionInstallBurden', 'NumRevolvingTradesWBalance', 'NumInstallTradesWBalance',
'NumBank2NatlTradesWHighUtilization', 'PercentTradesWBalance']
nums = attrs
model = Classifier(attrs=attrs, numeric=nums, label='RiskPerformance', pos='Good')
data = model.load_data('data/heloc/heloc_dataset_v1.csv')
print('\n% heloc dataset', len(data), len(data[0]))
return model, data
def titanic():
attrs = ['Sex', 'Age', 'Number_of_Siblings_Spouses', 'Number_Of_Parents_Children', 'Fare', 'Class', 'Embarked']
nums = ['Age', 'Number_of_Siblings_Spouses', 'Number_Of_Parents_Children', 'Fare']
model = Classifier(attrs=attrs, numeric=nums, label='Survived', pos='0')
data_train = model.load_data('data/titanic/train.csv')
data_test = model.load_data('data/titanic/test.csv')
print('\n% titanic train dataset', len(data_train), len(data_train[0]))
print('% titanic test dataset', len(data_test), len(data_test[0]))
return model, data_train, data_test
def anneal():
attrs = ['family', 'product_type', 'steel', 'carbon', 'hardness', 'temper_rolling', 'condition', 'formability',
'strength', 'non_ageing', 'surface_finish', 'surface_quality', 'enamelability', 'bc', 'bf', 'bt', 'bw_me',
'bl', 'm', 'chrom', 'phos', 'cbond', 'marvi', 'exptl', 'ferro', 'corr', 'blue_bright_varn_clean', 'lustre',
'jurofm', 's', 'p', 'shape', 'thick', 'width', 'len', 'oil', 'bore', 'packing']
nums = ['thick','width','len']
model = Classifier(attrs=attrs, numeric=nums, label='classes', pos='3')
data_train = model.load_data('data/anneal/train.csv')
data_test = model.load_data('data/anneal/test.csv')
print('\n% anneal train dataset', len(data_train), len(data_train[0]))
print('% anneal test dataset', len(data_test), len(data_test[0]))
return model, data_train, data_test
def parkison():
attrs = ['gender','PPE','DFA','RPDE','numPulses','numPeriodsPulses','meanPeriodPulses','stdDevPeriodPulses','locPctJitter','locAbsJitter','rapJitter','ppq5Jitter','ddpJitter','locShimmer','locDbShimmer','apq3Shimmer','apq5Shimmer','apq11Shimmer','ddaShimmer','meanAutoCorrHarmonicity','meanNoiseToHarmHarmonicity','meanHarmToNoiseHarmonicity','minIntensity','maxIntensity','meanIntensity','f1','f2','f3','f4','b1','b2','b3','b4','GQ_prc5_95','GQ_std_cycle_open','GQ_std_cycle_closed','GNE_mean','GNE_std','GNE_SNR_TKEO','GNE_SNR_SEO','GNE_NSR_TKEO','GNE_NSR_SEO','VFER_mean','VFER_std','VFER_entropy','VFER_SNR_TKEO','VFER_SNR_SEO','VFER_NSR_TKEO','VFER_NSR_SEO','IMF_SNR_SEO','IMF_SNR_TKEO','IMF_SNR_entropy','IMF_NSR_SEO','IMF_NSR_TKEO','IMF_NSR_entropy','mean_Log_energy','mean_MFCC_0th_coef','mean_MFCC_1st_coef','mean_MFCC_2nd_coef','mean_MFCC_3rd_coef','mean_MFCC_4th_coef','mean_MFCC_5th_coef','mean_MFCC_6th_coef','mean_MFCC_7th_coef','mean_MFCC_8th_coef','mean_MFCC_9th_coef','mean_MFCC_10th_coef','mean_MFCC_11th_coef','mean_MFCC_12th_coef','mean_delta_log_energy','mean_0th_delta','mean_1st_delta','mean_2nd_delta','mean_3rd_delta','mean_4th_delta','mean_5th_delta','mean_6th_delta','mean_7th_delta','mean_8th_delta','mean_9th_delta','mean_10th_delta','mean_11th_delta','mean_12th_delta','mean_delta_delta_log_energy','mean_delta_delta_0th','mean_1st_delta_delta','mean_2nd_delta_delta','mean_3rd_delta_delta','mean_4th_delta_delta','mean_5th_delta_delta','mean_6th_delta_delta','mean_7th_delta_delta','mean_8th_delta_delta','mean_9th_delta_delta','mean_10th_delta_delta','mean_11th_delta_delta','mean_12th_delta_delta','std_Log_energy','std_MFCC_0th_coef','std_MFCC_1st_coef','std_MFCC_2nd_coef','std_MFCC_3rd_coef','std_MFCC_4th_coef','std_MFCC_5th_coef','std_MFCC_6th_coef','std_MFCC_7th_coef','std_MFCC_8th_coef','std_MFCC_9th_coef','std_MFCC_10th_coef','std_MFCC_11th_coef','std_MFCC_12th_coef','std_delta_log_energy','std_0th_delta','std_1st_delta','std_2nd_delta','std_3rd_delta','std_4th_delta','std_5th_delta','std_6th_delta','std_7th_delta','std_8th_delta','std_9th_delta','std_10th_delta','std_11th_delta','std_12th_delta','std_delta_delta_log_energy','std_delta_delta_0th','std_1st_delta_delta','std_2nd_delta_delta','std_3rd_delta_delta','std_4th_delta_delta','std_5th_delta_delta','std_6th_delta_delta','std_7th_delta_delta','std_8th_delta_delta','std_9th_delta_delta','std_10th_delta_delta','std_11th_delta_delta','std_12th_delta_delta','Ea','Ed_1_coef','Ed_2_coef','Ed_3_coef','Ed_4_coef','Ed_5_coef','Ed_6_coef','Ed_7_coef','Ed_8_coef','Ed_9_coef','Ed_10_coef','det_entropy_shannon_1_coef','det_entropy_shannon_2_coef','det_entropy_shannon_3_coef','det_entropy_shannon_4_coef','det_entropy_shannon_5_coef','det_entropy_shannon_6_coef','det_entropy_shannon_7_coef','det_entropy_shannon_8_coef','det_entropy_shannon_9_coef','det_entropy_shannon_10_coef','det_entropy_log_1_coef','det_entropy_log_2_coef','det_entropy_log_3_coef','det_entropy_log_4_coef','det_entropy_log_5_coef','det_entropy_log_6_coef','det_entropy_log_7_coef','det_entropy_log_8_coef','det_entropy_log_9_coef','det_entropy_log_10_coef','det_TKEO_mean_1_coef','det_TKEO_mean_2_coef','det_TKEO_mean_3_coef','det_TKEO_mean_4_coef','det_TKEO_mean_5_coef','det_TKEO_mean_6_coef','det_TKEO_mean_7_coef','det_TKEO_mean_8_coef','det_TKEO_mean_9_coef','det_TKEO_mean_10_coef','det_TKEO_std_1_coef','det_TKEO_std_2_coef','det_TKEO_std_3_coef','det_TKEO_std_4_coef','det_TKEO_std_5_coef','det_TKEO_std_6_coef','det_TKEO_std_7_coef','det_TKEO_std_8_coef','det_TKEO_std_9_coef','det_TKEO_std_10_coef','app_entropy_shannon_1_coef','app_entropy_shannon_2_coef','app_entropy_shannon_3_coef','app_entropy_shannon_4_coef','app_entropy_shannon_5_coef','app_entropy_shannon_6_coef','app_entropy_shannon_7_coef','app_entropy_shannon_8_coef','app_entropy_shannon_9_coef','app_entropy_shannon_10_coef','app_entropy_log_1_coef','app_entropy_log_2_coef','app_entropy_log_3_coef','app_entropy_log_4_coef','app_entropy_log_5_coef','app_entropy_log_6_coef','app_entropy_log_7_coef','app_entropy_log_8_coef','app_entropy_log_9_coef','app_entropy_log_10_coef','app_det_TKEO_mean_1_coef','app_det_TKEO_mean_2_coef','app_det_TKEO_mean_3_coef','app_det_TKEO_mean_4_coef','app_det_TKEO_mean_5_coef','app_det_TKEO_mean_6_coef','app_det_TKEO_mean_7_coef','app_det_TKEO_mean_8_coef','app_det_TKEO_mean_9_coef','app_det_TKEO_mean_10_coef','app_TKEO_std_1_coef','app_TKEO_std_2_coef','app_TKEO_std_3_coef','app_TKEO_std_4_coef','app_TKEO_std_5_coef','app_TKEO_std_6_coef','app_TKEO_std_7_coef','app_TKEO_std_8_coef','app_TKEO_std_9_coef','app_TKEO_std_10_coef','Ea2','Ed2_1_coef','Ed2_2_coef','Ed2_3_coef','Ed2_4_coef','Ed2_5_coef','Ed2_6_coef','Ed2_7_coef','Ed2_8_coef','Ed2_9_coef','Ed2_10_coef','det_LT_entropy_shannon_1_coef','det_LT_entropy_shannon_2_coef','det_LT_entropy_shannon_3_coef','det_LT_entropy_shannon_4_coef','det_LT_entropy_shannon_5_coef','det_LT_entropy_shannon_6_coef','det_LT_entropy_shannon_7_coef','det_LT_entropy_shannon_8_coef','det_LT_entropy_shannon_9_coef','det_LT_entropy_shannon_10_coef','det_LT_entropy_log_1_coef','det_LT_entropy_log_2_coef','det_LT_entropy_log_3_coef','det_LT_entropy_log_4_coef','det_LT_entropy_log_5_coef','det_LT_entropy_log_6_coef','det_LT_entropy_log_7_coef','det_LT_entropy_log_8_coef','det_LT_entropy_log_9_coef','det_LT_entropy_log_10_coef','det_LT_TKEO_mean_1_coef','det_LT_TKEO_mean_2_coef','det_LT_TKEO_mean_3_coef','det_LT_TKEO_mean_4_coef','det_LT_TKEO_mean_5_coef','det_LT_TKEO_mean_6_coef','det_LT_TKEO_mean_7_coef','det_LT_TKEO_mean_8_coef','det_LT_TKEO_mean_9_coef','det_LT_TKEO_mean_10_coef','det_LT_TKEO_std_1_coef','det_LT_TKEO_std_2_coef','det_LT_TKEO_std_3_coef','det_LT_TKEO_std_4_coef','det_LT_TKEO_std_5_coef','det_LT_TKEO_std_6_coef','det_LT_TKEO_std_7_coef','det_LT_TKEO_std_8_coef','det_LT_TKEO_std_9_coef','det_LT_TKEO_std_10_coef','app_LT_entropy_shannon_1_coef','app_LT_entropy_shannon_2_coef','app_LT_entropy_shannon_3_coef','app_LT_entropy_shannon_4_coef','app_LT_entropy_shannon_5_coef','app_LT_entropy_shannon_6_coef','app_LT_entropy_shannon_7_coef','app_LT_entropy_shannon_8_coef','app_LT_entropy_shannon_9_coef','app_LT_entropy_shannon_10_coef','app_LT_entropy_log_1_coef','app_LT_entropy_log_2_coef','app_LT_entropy_log_3_coef','app_LT_entropy_log_4_coef','app_LT_entropy_log_5_coef','app_LT_entropy_log_6_coef','app_LT_entropy_log_7_coef','app_LT_entropy_log_8_coef','app_LT_entropy_log_9_coef','app_LT_entropy_log_10_coef','app_LT_TKEO_mean_1_coef','app_LT_TKEO_mean_2_coef','app_LT_TKEO_mean_3_coef','app_LT_TKEO_mean_4_coef','app_LT_TKEO_mean_5_coef','app_LT_TKEO_mean_6_coef','app_LT_TKEO_mean_7_coef','app_LT_TKEO_mean_8_coef','app_LT_TKEO_mean_9_coef','app_LT_TKEO_mean_10_coef','app_LT_TKEO_std_1_coef','app_LT_TKEO_std_2_coef','app_LT_TKEO_std_3_coef','app_LT_TKEO_std_4_coef','app_LT_TKEO_std_5_coef','app_LT_TKEO_std_6_coef','app_LT_TKEO_std_7_coef','app_LT_TKEO_std_8_coef','app_LT_TKEO_std_9_coef','app_LT_TKEO_std_10_coef','tqwt_energy_dec_1','tqwt_energy_dec_2','tqwt_energy_dec_3','tqwt_energy_dec_4','tqwt_energy_dec_5','tqwt_energy_dec_6','tqwt_energy_dec_7','tqwt_energy_dec_8','tqwt_energy_dec_9','tqwt_energy_dec_10','tqwt_energy_dec_11','tqwt_energy_dec_12','tqwt_energy_dec_13','tqwt_energy_dec_14','tqwt_energy_dec_15','tqwt_energy_dec_16','tqwt_energy_dec_17','tqwt_energy_dec_18','tqwt_energy_dec_19','tqwt_energy_dec_20','tqwt_energy_dec_21','tqwt_energy_dec_22','tqwt_energy_dec_23','tqwt_energy_dec_24','tqwt_energy_dec_25','tqwt_energy_dec_26','tqwt_energy_dec_27','tqwt_energy_dec_28','tqwt_energy_dec_29','tqwt_energy_dec_30','tqwt_energy_dec_31','tqwt_energy_dec_32','tqwt_energy_dec_33','tqwt_energy_dec_34','tqwt_energy_dec_35','tqwt_energy_dec_36','tqwt_entropy_shannon_dec_1','tqwt_entropy_shannon_dec_2','tqwt_entropy_shannon_dec_3','tqwt_entropy_shannon_dec_4','tqwt_entropy_shannon_dec_5','tqwt_entropy_shannon_dec_6','tqwt_entropy_shannon_dec_7','tqwt_entropy_shannon_dec_8','tqwt_entropy_shannon_dec_9','tqwt_entropy_shannon_dec_10','tqwt_entropy_shannon_dec_11','tqwt_entropy_shannon_dec_12','tqwt_entropy_shannon_dec_13','tqwt_entropy_shannon_dec_14','tqwt_entropy_shannon_dec_15','tqwt_entropy_shannon_dec_16','tqwt_entropy_shannon_dec_17','tqwt_entropy_shannon_dec_18','tqwt_entropy_shannon_dec_19','tqwt_entropy_shannon_dec_20','tqwt_entropy_shannon_dec_21','tqwt_entropy_shannon_dec_22','tqwt_entropy_shannon_dec_23','tqwt_entropy_shannon_dec_24','tqwt_entropy_shannon_dec_25','tqwt_entropy_shannon_dec_26','tqwt_entropy_shannon_dec_27','tqwt_entropy_shannon_dec_28','tqwt_entropy_shannon_dec_29','tqwt_entropy_shannon_dec_30','tqwt_entropy_shannon_dec_31','tqwt_entropy_shannon_dec_32','tqwt_entropy_shannon_dec_33','tqwt_entropy_shannon_dec_34','tqwt_entropy_shannon_dec_35','tqwt_entropy_shannon_dec_36','tqwt_entropy_log_dec_1','tqwt_entropy_log_dec_2','tqwt_entropy_log_dec_3','tqwt_entropy_log_dec_4','tqwt_entropy_log_dec_5','tqwt_entropy_log_dec_6','tqwt_entropy_log_dec_7','tqwt_entropy_log_dec_8','tqwt_entropy_log_dec_9','tqwt_entropy_log_dec_10','tqwt_entropy_log_dec_11','tqwt_entropy_log_dec_12','tqwt_entropy_log_dec_13','tqwt_entropy_log_dec_14','tqwt_entropy_log_dec_15','tqwt_entropy_log_dec_16','tqwt_entropy_log_dec_17','tqwt_entropy_log_dec_18','tqwt_entropy_log_dec_19','tqwt_entropy_log_dec_20','tqwt_entropy_log_dec_21','tqwt_entropy_log_dec_22','tqwt_entropy_log_dec_23','tqwt_entropy_log_dec_24','tqwt_entropy_log_dec_25','tqwt_entropy_log_dec_26','tqwt_entropy_log_dec_27','tqwt_entropy_log_dec_28','tqwt_entropy_log_dec_29','tqwt_entropy_log_dec_30','tqwt_entropy_log_dec_31','tqwt_entropy_log_dec_32','tqwt_entropy_log_dec_33','tqwt_entropy_log_dec_34','tqwt_entropy_log_dec_35','tqwt_entropy_log_dec_36','tqwt_TKEO_mean_dec_1','tqwt_TKEO_mean_dec_2','tqwt_TKEO_mean_dec_3','tqwt_TKEO_mean_dec_4','tqwt_TKEO_mean_dec_5','tqwt_TKEO_mean_dec_6','tqwt_TKEO_mean_dec_7','tqwt_TKEO_mean_dec_8','tqwt_TKEO_mean_dec_9','tqwt_TKEO_mean_dec_10','tqwt_TKEO_mean_dec_11','tqwt_TKEO_mean_dec_12','tqwt_TKEO_mean_dec_13','tqwt_TKEO_mean_dec_14','tqwt_TKEO_mean_dec_15','tqwt_TKEO_mean_dec_16','tqwt_TKEO_mean_dec_17','tqwt_TKEO_mean_dec_18','tqwt_TKEO_mean_dec_19','tqwt_TKEO_mean_dec_20','tqwt_TKEO_mean_dec_21','tqwt_TKEO_mean_dec_22','tqwt_TKEO_mean_dec_23','tqwt_TKEO_mean_dec_24','tqwt_TKEO_mean_dec_25','tqwt_TKEO_mean_dec_26','tqwt_TKEO_mean_dec_27','tqwt_TKEO_mean_dec_28','tqwt_TKEO_mean_dec_29','tqwt_TKEO_mean_dec_30','tqwt_TKEO_mean_dec_31','tqwt_TKEO_mean_dec_32','tqwt_TKEO_mean_dec_33','tqwt_TKEO_mean_dec_34','tqwt_TKEO_mean_dec_35','tqwt_TKEO_mean_dec_36','tqwt_TKEO_std_dec_1','tqwt_TKEO_std_dec_2','tqwt_TKEO_std_dec_3','tqwt_TKEO_std_dec_4','tqwt_TKEO_std_dec_5','tqwt_TKEO_std_dec_6','tqwt_TKEO_std_dec_7','tqwt_TKEO_std_dec_8','tqwt_TKEO_std_dec_9','tqwt_TKEO_std_dec_10','tqwt_TKEO_std_dec_11','tqwt_TKEO_std_dec_12','tqwt_TKEO_std_dec_13','tqwt_TKEO_std_dec_14','tqwt_TKEO_std_dec_15','tqwt_TKEO_std_dec_16','tqwt_TKEO_std_dec_17','tqwt_TKEO_std_dec_18','tqwt_TKEO_std_dec_19','tqwt_TKEO_std_dec_20','tqwt_TKEO_std_dec_21','tqwt_TKEO_std_dec_22','tqwt_TKEO_std_dec_23','tqwt_TKEO_std_dec_24','tqwt_TKEO_std_dec_25','tqwt_TKEO_std_dec_26','tqwt_TKEO_std_dec_27','tqwt_TKEO_std_dec_28','tqwt_TKEO_std_dec_29','tqwt_TKEO_std_dec_30','tqwt_TKEO_std_dec_31','tqwt_TKEO_std_dec_32','tqwt_TKEO_std_dec_33','tqwt_TKEO_std_dec_34','tqwt_TKEO_std_dec_35','tqwt_TKEO_std_dec_36','tqwt_medianValue_dec_1','tqwt_medianValue_dec_2','tqwt_medianValue_dec_3','tqwt_medianValue_dec_4','tqwt_medianValue_dec_5','tqwt_medianValue_dec_6','tqwt_medianValue_dec_7','tqwt_medianValue_dec_8','tqwt_medianValue_dec_9','tqwt_medianValue_dec_10','tqwt_medianValue_dec_11','tqwt_medianValue_dec_12','tqwt_medianValue_dec_13','tqwt_medianValue_dec_14','tqwt_medianValue_dec_15','tqwt_medianValue_dec_16','tqwt_medianValue_dec_17','tqwt_medianValue_dec_18','tqwt_medianValue_dec_19','tqwt_medianValue_dec_20','tqwt_medianValue_dec_21','tqwt_medianValue_dec_22','tqwt_medianValue_dec_23','tqwt_medianValue_dec_24','tqwt_medianValue_dec_25','tqwt_medianValue_dec_26','tqwt_medianValue_dec_27','tqwt_medianValue_dec_28','tqwt_medianValue_dec_29','tqwt_medianValue_dec_30','tqwt_medianValue_dec_31','tqwt_medianValue_dec_32','tqwt_medianValue_dec_33','tqwt_medianValue_dec_34','tqwt_medianValue_dec_35','tqwt_medianValue_dec_36','tqwt_meanValue_dec_1','tqwt_meanValue_dec_2','tqwt_meanValue_dec_3','tqwt_meanValue_dec_4','tqwt_meanValue_dec_5','tqwt_meanValue_dec_6','tqwt_meanValue_dec_7','tqwt_meanValue_dec_8','tqwt_meanValue_dec_9','tqwt_meanValue_dec_10','tqwt_meanValue_dec_11','tqwt_meanValue_dec_12','tqwt_meanValue_dec_13','tqwt_meanValue_dec_14','tqwt_meanValue_dec_15','tqwt_meanValue_dec_16','tqwt_meanValue_dec_17','tqwt_meanValue_dec_18','tqwt_meanValue_dec_19','tqwt_meanValue_dec_20','tqwt_meanValue_dec_21','tqwt_meanValue_dec_22','tqwt_meanValue_dec_23','tqwt_meanValue_dec_24','tqwt_meanValue_dec_25','tqwt_meanValue_dec_26','tqwt_meanValue_dec_27','tqwt_meanValue_dec_28','tqwt_meanValue_dec_29','tqwt_meanValue_dec_30','tqwt_meanValue_dec_31','tqwt_meanValue_dec_32','tqwt_meanValue_dec_33','tqwt_meanValue_dec_34','tqwt_meanValue_dec_35','tqwt_meanValue_dec_36','tqwt_stdValue_dec_1','tqwt_stdValue_dec_2','tqwt_stdValue_dec_3','tqwt_stdValue_dec_4','tqwt_stdValue_dec_5','tqwt_stdValue_dec_6','tqwt_stdValue_dec_7','tqwt_stdValue_dec_8','tqwt_stdValue_dec_9','tqwt_stdValue_dec_10','tqwt_stdValue_dec_11','tqwt_stdValue_dec_12','tqwt_stdValue_dec_13','tqwt_stdValue_dec_14','tqwt_stdValue_dec_15','tqwt_stdValue_dec_16','tqwt_stdValue_dec_17','tqwt_stdValue_dec_18','tqwt_stdValue_dec_19','tqwt_stdValue_dec_20','tqwt_stdValue_dec_21','tqwt_stdValue_dec_22','tqwt_stdValue_dec_23','tqwt_stdValue_dec_24','tqwt_stdValue_dec_25','tqwt_stdValue_dec_26','tqwt_stdValue_dec_27','tqwt_stdValue_dec_28','tqwt_stdValue_dec_29','tqwt_stdValue_dec_30','tqwt_stdValue_dec_31','tqwt_stdValue_dec_32','tqwt_stdValue_dec_33','tqwt_stdValue_dec_34','tqwt_stdValue_dec_35','tqwt_stdValue_dec_36','tqwt_minValue_dec_1','tqwt_minValue_dec_2','tqwt_minValue_dec_3','tqwt_minValue_dec_4','tqwt_minValue_dec_5','tqwt_minValue_dec_6','tqwt_minValue_dec_7','tqwt_minValue_dec_8','tqwt_minValue_dec_9','tqwt_minValue_dec_10','tqwt_minValue_dec_11','tqwt_minValue_dec_12','tqwt_minValue_dec_13','tqwt_minValue_dec_14','tqwt_minValue_dec_15','tqwt_minValue_dec_16','tqwt_minValue_dec_17','tqwt_minValue_dec_18','tqwt_minValue_dec_19','tqwt_minValue_dec_20','tqwt_minValue_dec_21','tqwt_minValue_dec_22','tqwt_minValue_dec_23','tqwt_minValue_dec_24','tqwt_minValue_dec_25','tqwt_minValue_dec_26','tqwt_minValue_dec_27','tqwt_minValue_dec_28','tqwt_minValue_dec_29','tqwt_minValue_dec_30','tqwt_minValue_dec_31','tqwt_minValue_dec_32','tqwt_minValue_dec_33','tqwt_minValue_dec_34','tqwt_minValue_dec_35','tqwt_minValue_dec_36','tqwt_maxValue_dec_1','tqwt_maxValue_dec_2','tqwt_maxValue_dec_3','tqwt_maxValue_dec_4','tqwt_maxValue_dec_5','tqwt_maxValue_dec_6','tqwt_maxValue_dec_7','tqwt_maxValue_dec_8','tqwt_maxValue_dec_9','tqwt_maxValue_dec_10','tqwt_maxValue_dec_11','tqwt_maxValue_dec_12','tqwt_maxValue_dec_13','tqwt_maxValue_dec_14','tqwt_maxValue_dec_15','tqwt_maxValue_dec_16','tqwt_maxValue_dec_17','tqwt_maxValue_dec_18','tqwt_maxValue_dec_19','tqwt_maxValue_dec_20','tqwt_maxValue_dec_21','tqwt_maxValue_dec_22','tqwt_maxValue_dec_23','tqwt_maxValue_dec_24','tqwt_maxValue_dec_25','tqwt_maxValue_dec_26','tqwt_maxValue_dec_27','tqwt_maxValue_dec_28','tqwt_maxValue_dec_29','tqwt_maxValue_dec_30','tqwt_maxValue_dec_31','tqwt_maxValue_dec_32','tqwt_maxValue_dec_33','tqwt_maxValue_dec_34','tqwt_maxValue_dec_35','tqwt_maxValue_dec_36','tqwt_skewnessValue_dec_1','tqwt_skewnessValue_dec_2','tqwt_skewnessValue_dec_3','tqwt_skewnessValue_dec_4','tqwt_skewnessValue_dec_5','tqwt_skewnessValue_dec_6','tqwt_skewnessValue_dec_7','tqwt_skewnessValue_dec_8','tqwt_skewnessValue_dec_9','tqwt_skewnessValue_dec_10','tqwt_skewnessValue_dec_11','tqwt_skewnessValue_dec_12','tqwt_skewnessValue_dec_13','tqwt_skewnessValue_dec_14','tqwt_skewnessValue_dec_15','tqwt_skewnessValue_dec_16','tqwt_skewnessValue_dec_17','tqwt_skewnessValue_dec_18','tqwt_skewnessValue_dec_19','tqwt_skewnessValue_dec_20','tqwt_skewnessValue_dec_21','tqwt_skewnessValue_dec_22','tqwt_skewnessValue_dec_23','tqwt_skewnessValue_dec_24','tqwt_skewnessValue_dec_25','tqwt_skewnessValue_dec_26','tqwt_skewnessValue_dec_27','tqwt_skewnessValue_dec_28','tqwt_skewnessValue_dec_29','tqwt_skewnessValue_dec_30','tqwt_skewnessValue_dec_31','tqwt_skewnessValue_dec_32','tqwt_skewnessValue_dec_33','tqwt_skewnessValue_dec_34','tqwt_skewnessValue_dec_35','tqwt_skewnessValue_dec_36','tqwt_kurtosisValue_dec_1','tqwt_kurtosisValue_dec_2','tqwt_kurtosisValue_dec_3','tqwt_kurtosisValue_dec_4','tqwt_kurtosisValue_dec_5','tqwt_kurtosisValue_dec_6','tqwt_kurtosisValue_dec_7','tqwt_kurtosisValue_dec_8','tqwt_kurtosisValue_dec_9','tqwt_kurtosisValue_dec_10','tqwt_kurtosisValue_dec_11','tqwt_kurtosisValue_dec_12','tqwt_kurtosisValue_dec_13','tqwt_kurtosisValue_dec_14','tqwt_kurtosisValue_dec_15','tqwt_kurtosisValue_dec_16','tqwt_kurtosisValue_dec_17','tqwt_kurtosisValue_dec_18','tqwt_kurtosisValue_dec_19','tqwt_kurtosisValue_dec_20','tqwt_kurtosisValue_dec_21','tqwt_kurtosisValue_dec_22','tqwt_kurtosisValue_dec_23','tqwt_kurtosisValue_dec_24','tqwt_kurtosisValue_dec_25','tqwt_kurtosisValue_dec_26','tqwt_kurtosisValue_dec_27','tqwt_kurtosisValue_dec_28','tqwt_kurtosisValue_dec_29','tqwt_kurtosisValue_dec_30','tqwt_kurtosisValue_dec_31','tqwt_kurtosisValue_dec_32','tqwt_kurtosisValue_dec_33','tqwt_kurtosisValue_dec_34','tqwt_kurtosisValue_dec_35','tqwt_kurtosisValue_dec_36']
nums = attrs[1:]
model = Classifier(attrs=attrs, numeric=nums, label='class', pos='1')
data = model.load_data('data/parkison_disease/parkison_disease.csv')
print('\n% parkison disease dataset', len(data), len(data[0]))
return model, data
def avila():
attrs = ['f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10']
nums = ['f1', 'f2', 'f3','f4','f5','f6','f7','f8','f9','f10']
model = Classifier(attrs=attrs, numeric=nums, label='class', pos='A')
data_train = model.load_data('data/avila/train.csv')
data_test = model.load_data('data/avila/test.csv')
print('\n% avila train dataset', len(data_train), len(data_train[0]))
print('% avila test dataset', len(data_test), len(data_test[0]))
return model, data_train, data_test
| 29,365 | 124.495726 | 17,642 | py |
PlosCB2013 | PlosCB2013-master/PlosCB2013_func.py | #This script is used to generate the set of representative positive Boolean function up to n=7
#Functions are generated level by level. Functions of a given level l have l terms/clauses in their DNF/CNF
#For n=6 everything can be run locally, however, for n=7 one needs a cluster or a very powerful machine
import numpy as np
import h5py
from itertools import permutations
def cartesian(arrays, out=None):
"""
Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
dtype = arrays[0].dtype
n = np.prod([x.size for x in arrays])
if out is None:
out = np.zeros([n, len(arrays)], dtype=dtype)
m = n / arrays[0].size
out[:,0] = np.repeat(arrays[0], m)
if arrays[1:]:
cartesian(arrays[1:], out=out[0:m,1:])
for j in xrange(1, arrays[0].size):
out[j*m:(j+1)*m,1:] = out[0:m,1:]
return out
def iptgen(n=4,ex=[2]):
"""
Generate an input subspace containing vectors with a number of active inputs listed in ex.
Parameters
----------
n : an integer, the input dimentionality
ex : a list, to select input subsets composed of input vectors containing the same number of 1s
Returns
-------
A list of arrays, the size depend on n and ex. There are combination(n,ex[0])+combination(n,ex[1])+... arrays
each array have is of length n. Arrays are in organized from smaller to higher binary number.
Examples
--------
>>> iptgen(n=4, ex=[2])
array([[0,0,1,1],
[0,1,0,1],
[0,1,1,0],
[1,0,0,1],
[1 0 1 0],
[1 1,0,0]])
"""
ipt = cartesian(np.repeat([[0,1]],n,axis=0))
tr = np.zeros(ipt.shape[0])
for i in ex:
tr += np.sum(ipt,axis=1) == i #This trace vector enable to to pick the desired vectors
return np.repeat(ipt,tr>=1,axis=0)
def gsubs(n):
"""
Built the subsummation matrix for a given number of input variables n
Parameters
----------
n : an integer
number of input variables
Returns
-------
subs : a numpy Boolean array of size 2**n x 2**n
telling if the two literals are subsuming or not
Examples
--------
>>> subsummation_mat = gsubs(2)
"""
ipt = range(2**n)
subs = []
for c1 in ipt:
line = []
for c2 in ipt:
if c1|c2==c1 or c1|c2==c2 or c1>c2: #Test for subsummation and order
line.append(False) # Do not concatenate if they subsum
else:
line.append(True) # Concatenate their conjunction otherwise
subs.append(line)
return np.array(subs,dtype=np.bool)
def gperms(n):
"""
Generate all the possible permutations of the input vectors you can obtain
by permuting the label of the input lines.
Parameters
----------
n : an integer
number of input variables
Returns
-------
perms : a list of integer lists
All possible permutations of the input vector set when labels are permuted.
Examples
--------
>>> gperms(n=2)
[[0,1,2,3],[0,2,1,3]]
Comments
--------
The first list is the non-permuted input vector set.
The number of lists is equal to n factorial, a vectorized form would be faster
but it is okay for n=7.
"""
ipt = [list(iptc) for iptc in iptgen(n,range(n+1))]
permipt = [0 for i in range(len(ipt))]
perms = list()
for per in permutations(range(len(ipt[0]))):
#Recontruct a new permuted input space
permipt = [[ic[i] for i in per] for ic in ipt]
#Recording this permuted input space into a list of list
perms.append([int(''.join([str(i) for i in ic]),base=2) for ic in permipt])
return perms
def nxtl(n, fprev, subs, perms):
"""
Generate the set of positive representative Boolean functions of level mth
from the one of level m-1th. A function is a tuple of integers which are the
term (resp. clause) of the Disjunctive Normal Form (resp Conjunctive NF)
Parameters
----------
n : an integer
number of input variables
fprev : a list of tuples
minimal positive DNFs (CNFs) from the previous level
subsu : an Boolean numpy array
prebuilt to determine if the two literals subsum
perms : a list of list
possible permutations
Returns
-------
fnext : a set of tuples
minimal positives DNF of the next level
Examples
--------
>>> nxtl(3, [(i,) for i in range(2**3)], subs=gsubs(3), perms=gperms(3))
"""
literals = [(i,) for i in range(2**n)] #Built the list of all possible literals
fnext = set()
for f1 in fprev: # Iterate though all DNF of level m-1
for f2 in literals[max(f1):]: # select only the prime superior to the highest literal
i = 0
while subs[(f1[i],) + f2] != 0: #Test if any literal in the DNF subsum the prime
i += 1 # go to the next literal in the DNF
if i == len(f1): # if no literal in the DNF subsum the prime
fc = f1+f2 # concatenate the DNF with the prime
#Create of all possible children of fc
f = [tuple(sorted([pc[i] for i in fc])) for pc in perms]
#Add the child which is the "smallest" tuple
fnext.add(min(f))
break
return fnext
def wrap(n, lmax, filename="record.hdf5"):
"""
Wrap gsubs, gperms, and nxtl to generate all positive monotone function for a given number of variables
Parameters
----------
lmax : an integer
level for which the generation stops
filename : a char
name of the hdf5 file
Returns
-------
Nothing
Examples
--------
>>> wrap(3, 2)
"""
phi=set([(2**i-1,) for i in range(1,n+1)]) #Building level one
perms = gperms(n)
subsu = gsubs(n)
gname = "n" + str(n)
dname = "/" + gname + "/level"
print 2 # Counting the two level0 functions
for i in range(1,lmax+1):
hdf = h5py.File(filename,"a")
print len(phi)
rec = [tuple(fcur) for fcur in phi]
rec.sort()
if dname + str(i) not in hdf and phi:
hdf.create_dataset(dname + str(i), data=np.array(rec), dtype=np.int8)
hdf.close()
phi = nxtl(n,phi,subsu,perms)
if __name__ == '__main__':
wrap(3,3)
wrap(4,6)
wrap(5,10)
#wrap(6,20)
| 7,138 | 27.78629 | 113 | py |
PlosCB2013 | PlosCB2013-master/PlosCB2013_biophy.py | # Script mostly written by Romain Caze some part are updated from Andrew Davidson code snippets
# last modification:26/03/13.
from neuron import nrn, h, hclass, run, init
import matplotlib.pyplot as plt
import numpy as np
# Wrappers from Andrew Davidson neuronpy modified and commented by Romain Caze
# They enable to interface NEURON and Python more easily.
class Mechanism(object):
"""
Create mechanism which will be inserted in the membrane of a cell
Examples
--------
>>> leak = Mechanism('pas', {'e': -65, 'g': 0.0002})
>>> hh = Mechanism('hh')
"""
def __init__(self, name, parameters={}):
"""
Parameters
----------
name: a char
the label of the mechanism
parameters: a dictionary
contains the different parameter of a mechanism
"""
self.name = name
self.parameters = parameters
def insert_into(self, section):
"""
Method used to insert a mechanism into a section
Parameters
----------
section: a NEURON section
the section where the mechanism needs to be inserted
"""
section.insert(self.name)
for name, value in self.parameters.items():
for segment in section:
mech = getattr(segment, self.name)
setattr(mech, name, value)
class Section(nrn.Section):
"""
Create a NEURON section with certain mechanism inserted in it
Examples
--------
>>> soma = Section(L=30, diam=30, mechanisms=[hh, leak])
>>> apical = Section(L=600, diam=2, nseg=5, mechanisms=[leak],
... parent=soma, connection_point=0)
"""
def __init__(self, L, diam, nseg=1, Ra=100, cm=1, mechanisms=[], parent=None, connection_point=1):
"""
Parameters
----------
L: a float
length in micrometers
diam: a float
diameter in micrometers
nseg: an int
number of segements
Ra: a float
surface axial resistance Ohm/micrometer square
cm: a float
capacitance in F/micrometer square
mechanisms: a list
mechanisms to be inserted (need to be created beforehand)
parent: a NEURON section
section to which this section is coming from
connection_point: a float between 0 and 1
where this section is connected to its parent
"""
nrn.Section.__init__(self)
# set geometry
self.L = L
self.diam = diam
self.nseg = nseg
# set cable properties
self.Ra = Ra
self.cm = cm
# connect to parent section
if parent:
self.connect(parent, connection_point, 0)
# add the mechanisms
for mechanism in mechanisms:
mechanism.insert_into(self)
def record_spikes(self, threshold=-30):
"""
Record the number of spikes produced in this section,
which is the number of time a voltage is crossed in
the middle of a section
Parameters
----------
threshold: a float
voltage determining the presence or not of a spike
Returns
-------
nothing, but change the self.spikecount
"""
self.spiketimes = h.Vector()
self.spikecount = h.APCount(0.5, sec=self)
self.spikecount.thresh = threshold
self.spikecount.record(self.spiketimes)
#My own class inspired by the pydesign tutorial.
#http://www.paedia.info/quickstart/pydesign.html
class BipolarNeuron(object):
"""
Produce neuron objects with a standard soma
and two identical dendrites connected on opposite sides of the
soma. For the dendrites the following parameters can be changed:
"""
def __init__(self, d_length=50, d_diam=0.4):
"""
Parameters
----------
d_length: an integer
length of the dendrites
d_diameter:
diameter of each dendrites
"""
# Creating Mechanisms
hh = Mechanism('hh')
pas = Mechanism('pas', {'e':-65,'g':0.0001})
# Creating the Sections
self.soma = Section(10, 10, Ra=150, mechanisms=[hh,pas])
self.Xdend = Section(d_length, d_diam, nseg=10, Ra=150, parent=self.soma, mechanisms=[pas])
self.Ydend = Section(d_length, d_diam, nseg=10, Ra=150, parent=self.soma, mechanisms=[pas])
def initialise(self, vrest=-65):
"""
Initialise the model, to launch before each simulations
"""
for sec in h.allsec():
h.finitialize(vrest, sec)
h.fcurrent(sec)
h.frecord_init()
def min_sim(self, TSTOP=100):
"""
Launch a minimal simulation to test the model and determine its resting potential empirically
"""
vrec = h.Vector()
vrec.record(self.soma(0.5)._ref_v)
for sec in h.allsec():
h.finitialize(-65, sec)
h.fcurrent(sec)
h.frecord_init()
while h.t < TSTOP: #Launch a simulation
h.fadvance()
vrest = np.array(vrec)[-1]
return vrest
class Simulation(object):
"""
Create and control a simulation
Example
-------
>>> cell = BipolarNeuron()
>>> sim = Simulation(cell)
>>> sim.go()
>>> sim.show()
"""
def __init__(self,
cell,
sim_time=100,
dt=0.01):
"""
Parameters
----------
cell: BipolarNeuron object
Neuron model to be stimulated
sim_tim: integer
Time of the simulation in ms
protocol_char: list of integers in char form
Readable description of the stimulation protocol
'x-y' generate a stimulation episode where site x and y are activated together.
Site are labelled by positive integers, '0' corresponds to a no-stimulation episode.
episode_time: an integer
timelength of an episode in ms
dt: a float
the integration timestep (fix)
"""
self.cell = cell
self.sim_time = sim_time
self.dt = dt
self.syn, self.stim, self.vplay, self.netcon = {}, {}, {}, {}
def add_ExpSyn(self, section='soma', position=0.5, name='default', tstim=[50], w=.001):
"""
Create/replace an Expsyn synapse on a given section which is active at the time in tstim
Comments
--------
The sort command is here to make sure that tstim are in the right order. This method
requires the pre-compiling of vecstim.mod by NEURON.
"""
self.syn[name] = h.ExpSyn(self.cell.__getattribute__(section)(position))
self.stim[name] = h.Vector(np.sort(tstim)) # Converting tstim into a NEURON vector (to play in NEURON)
self.vplay[name] = h.VecStim() # Creating play vectors to interface with NEURON
self.vplay[name].play(self.stim[name]) # Connecting vector to VecStim object to play them
self.netcon[name] = h.NetCon(self.vplay[name], self.syn[name]) # Building the netcon object to connect the stims and the synapses
self.netcon[name].weight[0] = w # Setting the individual weights
def set_IClamp(self, name='IClamp', delay=1, amp=-1, dur=3):
"""
Create a current clamp point process on the soma
"""
stim = h.IClamp(self.cell.soma(0.5))
stim.delay = delay
stim.amp = amp
stim.dur = dur
self.stim[name] = stim
def show(self):
"""
Show the voltage trace after a simulation
"""
x = np.array(self.rec_t)
y = np.array(self.rec_v)
plt.plot(x, y)
plt.xlabel("Time [ms]")
plt.ylabel("Voltage [mV]")
#plt.axis(ymin=-120, ymax=-50)
plt.show()
def set_recording(self):
"""
Set a recording vector to the soma
"""
# Record Time
self.rec_t = h.Vector()
self.rec_t.record(h._ref_t)
# Record Voltage
self.rec_v = h.Vector()
self.rec_v.record(self.cell.soma(0.5)._ref_v)
def get_recording(self):
"""
Return the recordings of the somatic voltage and a time axis
"""
time = np.array(self.rec_t)
voltage = np.array(self.rec_v)
return time, voltage
def go(self, sim_time=None):
"""
Launch a simulation of a given time
Parameters
----------
sim_time: an integer
the time in millisecond of the simulation
it replaces the self.sim_time if defined
Comments
--------
It seems that when multiple go method are done it does not
change the output vector.
"""
h.t = 0
#Start recording
self.set_recording()
h.dt = self.dt
self.cell.initialise()
init()
#while h.t < self.sim_time: #I was using this procedure before do not know which one is better
# h.fadvance()
if sim_time:
run(sim_time)
else:
run(self.sim_time)
def insert_signal(self,
synlocs=[['Xdend',1], ['Xdend',1], ['Ydend',1]],
el=0.005,
weights=[1,1,1],
tstims=[[25,50],[25],[50]]
):
"""
Add a set of stimulation to a simulation object to reproduce the uncaging protocol we made with Alex.
Parameters
----------
synlocs: a list of lists made of a char and a number
sections and positions of the stimulation
el: a float
elementary weight value, defining the maximum conductance
conductance of a synapse in microS
weights: a list
multiplicative factors corresponding to the weights
tstims: a list of lists.
stimulation times corresponding to each site (one list per site).
Returns
-------
sim: a simulation object
this simulation is now "decorated" with stimulations
"""
for i, tstim in enumerate(tstims):
self.add_ExpSyn(section=synlocs[i][0], position=synlocs[i][1], name='Stream'+str(i), tstim=tstim, w=weights[i]*el)
if __name__ == '__main__':
mysim = Simulation(BipolarNeuron())
mysim.insert_signal()
mysim.go()
mysim.show()
| 10,507 | 30.842424 | 137 | py |
TensorFlowTTS | TensorFlowTTS-master/setup.py | """Setup Tensorflow TTS libarary."""
import os
import sys
from distutils.version import LooseVersion
import pip
from setuptools import find_packages, setup
if LooseVersion(sys.version) < LooseVersion("3.6"):
raise RuntimeError(
"TensorFlow TTS requires python >= 3.6, "
"but your Python version is {}".format(sys.version)
)
if LooseVersion(pip.__version__) < LooseVersion("19"):
raise RuntimeError(
"pip>=19.0.0 is required, but your pip version is {}. "
'Try again after "pip install -U pip"'.format(pip.__version__)
)
# TODO(@dathudeptrai) update requirement if needed.
requirements = {
"install": [
"tensorflow-gpu==2.7.0",
"tensorflow-addons>=0.10.0",
"setuptools>=38.5.1",
"huggingface_hub==0.0.8",
"librosa>=0.7.0",
"soundfile>=0.10.2",
"matplotlib>=3.1.0",
"PyYAML>=3.12",
"tqdm>=4.26.1",
"h5py>=2.10.0",
"unidecode>=1.1.1",
"inflect>=4.1.0",
"scikit-learn>=0.22.0",
"pyworld>=0.2.10",
"numba>=0.48", # Fix No module named "numba.decorators"
"jamo>=0.4.1",
"pypinyin",
"g2pM",
"textgrid",
"click",
"g2p_en",
"dataclasses",
"pyopenjtalk",
],
"setup": ["numpy", "pytest-runner",],
"test": [
"pytest>=3.3.0",
"hacking>=1.1.0",
],
}
# TODO(@dathudeptrai) update console_scripts.
entry_points = {
"console_scripts": [
"tensorflow-tts-preprocess=tensorflow_tts.bin.preprocess:preprocess",
"tensorflow-tts-compute-statistics=tensorflow_tts.bin.preprocess:compute_statistics",
"tensorflow-tts-normalize=tensorflow_tts.bin.preprocess:normalize",
]
}
install_requires = requirements["install"]
setup_requires = requirements["setup"]
tests_require = requirements["test"]
extras_require = {
k: v for k, v in requirements.items() if k not in ["install", "setup"]
}
dirname = os.path.dirname(__file__)
setup(
name="TensorFlowTTS",
version="0.0",
url="https://github.com/tensorspeech/TensorFlowTTS",
author="Minh Nguyen Quan Anh, Alejandro Miguel Velasquez, Dawid Kobus, Eren Gölge, Kuan Chen, Takuya Ebata, Trinh Le Quang, Yunchao He",
author_email="nguyenquananhminh@gmail.com",
description="TensorFlowTTS: Real-Time State-of-the-art Speech Synthesis for TensorFlow 2",
long_description=open(os.path.join(dirname, "README.md"), encoding="utf-8").read(),
long_description_content_type="text/markdown",
license="Apache-2.0",
packages=find_packages(include=["tensorflow_tts*"]),
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
extras_require=extras_require,
entry_points=entry_points,
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Intended Audience :: Science/Research",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 3,199 | 31.323232 | 140 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/tacotron2/decode_tacotron2.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decode Tacotron-2."""
import argparse
import logging
import os
import sys
sys.path.append(".")
import numpy as np
import tensorflow as tf
import yaml
from tqdm import tqdm
import matplotlib.pyplot as plt
from examples.tacotron2.tacotron_dataset import CharactorMelDataset
from tensorflow_tts.configs import Tacotron2Config
from tensorflow_tts.models import TFTacotron2
def main():
"""Running decode tacotron-2 mel-spectrogram."""
parser = argparse.ArgumentParser(
description="Decode mel-spectrogram from folder ids with trained Tacotron-2 "
"(See detail in tensorflow_tts/example/tacotron2/decode_tacotron2.py)."
)
parser.add_argument(
"--rootdir",
default=None,
type=str,
required=True,
help="directory including ids/durations files.",
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save generated speech."
)
parser.add_argument(
"--checkpoint", type=str, required=True, help="checkpoint file to be loaded."
)
parser.add_argument(
"--use-norm", default=1, type=int, help="usr norm-mels for train or raw."
)
parser.add_argument("--batch-size", default=8, type=int, help="batch size.")
parser.add_argument("--win-front", default=3, type=int, help="win-front.")
parser.add_argument("--win-back", default=3, type=int, help="win-front.")
parser.add_argument(
"--config",
default=None,
type=str,
required=True,
help="yaml format configuration file. if not explicitly provided, "
"it will be searched in the checkpoint directory. (default=None)",
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# load config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
if config["format"] == "npy":
char_query = "*-ids.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
char_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy is supported.")
# define data-loader
dataset = CharactorMelDataset(
dataset=config["tacotron2_params"]["dataset"],
root_dir=args.rootdir,
charactor_query=char_query,
mel_query=mel_query,
charactor_load_fn=char_load_fn,
mel_load_fn=mel_load_fn,
reduction_factor=config["tacotron2_params"]["reduction_factor"]
)
dataset = dataset.create(allow_cache=True, batch_size=args.batch_size)
# define model and load checkpoint
tacotron2 = TFTacotron2(
config=Tacotron2Config(**config["tacotron2_params"]),
name="tacotron2",
)
tacotron2._build() # build model to be able load_weights.
tacotron2.load_weights(args.checkpoint)
# setup window
tacotron2.setup_window(win_front=args.win_front, win_back=args.win_back)
for data in tqdm(dataset, desc="[Decoding]"):
utt_ids = data["utt_ids"]
utt_ids = utt_ids.numpy()
# tacotron2 inference.
(
mel_outputs,
post_mel_outputs,
stop_outputs,
alignment_historys,
) = tacotron2.inference(
input_ids=data["input_ids"],
input_lengths=data["input_lengths"],
speaker_ids=data["speaker_ids"],
)
# convert to numpy
post_mel_outputs = post_mel_outputs.numpy()
for i, post_mel_output in enumerate(post_mel_outputs):
stop_token = tf.math.round(tf.nn.sigmoid(stop_outputs[i])) # [T]
real_length = tf.math.reduce_sum(
tf.cast(tf.math.equal(stop_token, 0.0), tf.int32), -1
)
post_mel_output = post_mel_output[:real_length, :]
saved_name = utt_ids[i].decode("utf-8")
# save D to folder.
np.save(
os.path.join(args.outdir, f"{saved_name}-norm-feats.npy"),
post_mel_output.astype(np.float32),
allow_pickle=False,
)
if __name__ == "__main__":
main()
| 5,594 | 31.52907 | 87 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/tacotron2/tacotron_dataset.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tacotron Related Dataset modules."""
import itertools
import logging
import os
import random
import numpy as np
import tensorflow as tf
from tensorflow_tts.datasets.abstract_dataset import AbstractDataset
from tensorflow_tts.utils import find_files
class CharactorMelDataset(AbstractDataset):
"""Tensorflow Charactor Mel dataset."""
def __init__(
self,
dataset,
root_dir,
charactor_query="*-ids.npy",
mel_query="*-norm-feats.npy",
align_query="",
charactor_load_fn=np.load,
mel_load_fn=np.load,
mel_length_threshold=0,
reduction_factor=1,
mel_pad_value=0.0,
char_pad_value=0,
ga_pad_value=-1.0,
g=0.2,
use_fixed_shapes=False,
):
"""Initialize dataset.
Args:
root_dir (str): Root directory including dumped files.
charactor_query (str): Query to find charactor files in root_dir.
mel_query (str): Query to find feature files in root_dir.
charactor_load_fn (func): Function to load charactor file.
align_query (str): Query to find FAL files in root_dir. If empty, we use stock guided attention loss
mel_load_fn (func): Function to load feature file.
mel_length_threshold (int): Threshold to remove short feature files.
reduction_factor (int): Reduction factor on Tacotron-2 paper.
mel_pad_value (float): Padding value for mel-spectrogram.
char_pad_value (int): Padding value for charactor.
ga_pad_value (float): Padding value for guided attention.
g (float): G value for guided attention.
use_fixed_shapes (bool): Use fixed shape for mel targets or not.
max_char_length (int): maximum charactor length if use_fixed_shapes=True.
max_mel_length (int): maximum mel length if use_fixed_shapes=True
"""
# find all of charactor and mel files.
charactor_files = sorted(find_files(root_dir, charactor_query))
mel_files = sorted(find_files(root_dir, mel_query))
mel_lengths = [mel_load_fn(f).shape[0] for f in mel_files]
char_lengths = [charactor_load_fn(f).shape[0] for f in charactor_files]
# assert the number of files
assert len(mel_files) != 0, f"Not found any mels files in ${root_dir}."
assert (
len(mel_files) == len(charactor_files) == len(mel_lengths)
), f"Number of charactor, mel and duration files are different \
({len(mel_files)} vs {len(charactor_files)} vs {len(mel_lengths)})."
self.align_files = []
if len(align_query) > 1:
align_files = sorted(find_files(root_dir, align_query))
assert len(align_files) == len(
mel_files
), f"Number of align files ({len(align_files)}) and mel files ({len(mel_files)}) are different"
logging.info("Using FAL loss")
self.align_files = align_files
else:
logging.info("Using guided attention loss")
if ".npy" in charactor_query:
suffix = charactor_query[1:]
utt_ids = [os.path.basename(f).replace(suffix, "") for f in charactor_files]
# set global params
self.utt_ids = utt_ids
self.mel_files = mel_files
self.charactor_files = charactor_files
self.mel_load_fn = mel_load_fn
self.charactor_load_fn = charactor_load_fn
self.mel_lengths = mel_lengths
self.char_lengths = char_lengths
self.reduction_factor = reduction_factor
self.mel_length_threshold = mel_length_threshold
self.mel_pad_value = mel_pad_value
self.char_pad_value = char_pad_value
self.ga_pad_value = ga_pad_value
self.g = g
self.use_fixed_shapes = use_fixed_shapes
self.max_char_length = np.max(char_lengths)
if np.max(mel_lengths) % self.reduction_factor == 0:
self.max_mel_length = np.max(mel_lengths)
else:
self.max_mel_length = (
np.max(mel_lengths)
+ self.reduction_factor
- np.max(mel_lengths) % self.reduction_factor
)
def get_args(self):
return [self.utt_ids]
def generator(self, utt_ids):
for i, utt_id in enumerate(utt_ids):
mel_file = self.mel_files[i]
charactor_file = self.charactor_files[i]
align_file = self.align_files[i] if len(self.align_files) > 1 else ""
items = {
"utt_ids": utt_id,
"mel_files": mel_file,
"charactor_files": charactor_file,
"align_files": align_file,
}
yield items
@tf.function
def _load_data(self, items):
mel = tf.numpy_function(np.load, [items["mel_files"]], tf.float32)
charactor = tf.numpy_function(np.load, [items["charactor_files"]], tf.int32)
g_att = (
tf.numpy_function(np.load, [items["align_files"]], tf.float32)
if len(self.align_files) > 1
else None
)
mel_length = len(mel)
char_length = len(charactor)
# padding mel to make its length is multiple of reduction factor.
real_mel_length = mel_length
remainder = mel_length % self.reduction_factor
if remainder != 0:
new_mel_length = mel_length + self.reduction_factor - remainder
mel = tf.pad(
mel,
[[0, new_mel_length - mel_length], [0, 0]],
constant_values=self.mel_pad_value,
)
mel_length = new_mel_length
items = {
"utt_ids": items["utt_ids"],
"input_ids": charactor,
"input_lengths": char_length,
"speaker_ids": 0,
"mel_gts": mel,
"mel_lengths": mel_length,
"real_mel_lengths": real_mel_length,
"g_attentions": g_att,
}
return items
def _guided_attention(self, items):
"""Guided attention. Refer to page 3 on the paper (https://arxiv.org/abs/1710.08969)."""
items = items.copy()
mel_len = items["mel_lengths"] // self.reduction_factor
char_len = items["input_lengths"]
xv, yv = tf.meshgrid(tf.range(char_len), tf.range(mel_len), indexing="ij")
f32_matrix = tf.cast(yv / mel_len - xv / char_len, tf.float32)
items["g_attentions"] = 1.0 - tf.math.exp(
-(f32_matrix ** 2) / (2 * self.g ** 2)
)
return items
def create(
self,
allow_cache=False,
batch_size=1,
is_shuffle=False,
map_fn=None,
reshuffle_each_iteration=True,
drop_remainder=True,
):
"""Create tf.dataset function."""
output_types = self.get_output_dtypes()
datasets = tf.data.Dataset.from_generator(
self.generator, output_types=output_types, args=(self.get_args())
)
# load data
datasets = datasets.map(
lambda items: self._load_data(items), tf.data.experimental.AUTOTUNE
)
# calculate guided attention
if len(self.align_files) < 1:
datasets = datasets.map(
lambda items: self._guided_attention(items),
tf.data.experimental.AUTOTUNE,
)
datasets = datasets.filter(
lambda x: x["mel_lengths"] > self.mel_length_threshold
)
if allow_cache:
datasets = datasets.cache()
if is_shuffle:
datasets = datasets.shuffle(
self.get_len_dataset(),
reshuffle_each_iteration=reshuffle_each_iteration,
)
# define padding value.
padding_values = {
"utt_ids": " ",
"input_ids": self.char_pad_value,
"input_lengths": 0,
"speaker_ids": 0,
"mel_gts": self.mel_pad_value,
"mel_lengths": 0,
"real_mel_lengths": 0,
"g_attentions": self.ga_pad_value,
}
# define padded shapes.
padded_shapes = {
"utt_ids": [],
"input_ids": [None]
if self.use_fixed_shapes is False
else [self.max_char_length],
"input_lengths": [],
"speaker_ids": [],
"mel_gts": [None, 80]
if self.use_fixed_shapes is False
else [self.max_mel_length, 80],
"mel_lengths": [],
"real_mel_lengths": [],
"g_attentions": [None, None]
if self.use_fixed_shapes is False
else [self.max_char_length, self.max_mel_length // self.reduction_factor],
}
datasets = datasets.padded_batch(
batch_size,
padded_shapes=padded_shapes,
padding_values=padding_values,
drop_remainder=drop_remainder,
)
datasets = datasets.prefetch(tf.data.experimental.AUTOTUNE)
return datasets
def get_output_dtypes(self):
output_types = {
"utt_ids": tf.string,
"mel_files": tf.string,
"charactor_files": tf.string,
"align_files": tf.string,
}
return output_types
def get_len_dataset(self):
return len(self.utt_ids)
def __name__(self):
return "CharactorMelDataset"
| 10,099 | 34.438596 | 112 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/tacotron2/export_align.py | import os
import shutil
from tqdm import tqdm
import argparse
from scipy.ndimage import zoom
from skimage.data import camera
import numpy as np
from scipy.spatial.distance import cdist
def safemkdir(dirn):
if not os.path.isdir(dirn):
os.mkdir(dirn)
from pathlib import Path
def duration_to_alignment(in_duration):
total_len = np.sum(in_duration)
num_chars = len(in_duration)
attention = np.zeros(shape=(num_chars, total_len), dtype=np.float32)
y_offset = 0
for duration_idx, duration_val in enumerate(in_duration):
for y_val in range(0, duration_val):
attention[duration_idx][y_offset + y_val] = 1.0
y_offset += duration_val
return attention
def rescale_alignment(in_alignment, in_targcharlen):
current_x = in_alignment.shape[0]
x_ratio = in_targcharlen / current_x
pivot_points = []
zoomed = zoom(in_alignment, (x_ratio, 1.0), mode="nearest")
for x_v in range(0, zoomed.shape[0]):
for y_v in range(0, zoomed.shape[1]):
val = zoomed[x_v][y_v]
if val < 0.5:
val = 0.0
else:
val = 1.0
pivot_points.append((x_v, y_v))
zoomed[x_v][y_v] = val
if zoomed.shape[0] != in_targcharlen:
print("Zooming didn't rshape well, explicitly reshaping")
zoomed.resize((in_targcharlen, in_alignment.shape[1]))
return zoomed, pivot_points
def gather_dist(in_mtr, in_points):
# initialize with known size for fast
full_coords = [(0, 0) for x in range(in_mtr.shape[0] * in_mtr.shape[1])]
i = 0
for x in range(0, in_mtr.shape[0]):
for y in range(0, in_mtr.shape[1]):
full_coords[i] = (x, y)
i += 1
return cdist(full_coords, in_points, "euclidean")
def create_guided(in_align, in_pvt, looseness):
new_att = np.ones(in_align.shape, dtype=np.float32)
# It is dramatically faster that we first gather all the points and calculate than do it manually
# for each point in for loop
dist_arr = gather_dist(in_align, in_pvt)
# Scale looseness based on attention size. (addition works better than mul). Also divide by 100
# because having user input 3.35 is nicer
real_loose = (looseness / 100) * (new_att.shape[0] + new_att.shape[1])
g_idx = 0
for x in range(0, new_att.shape[0]):
for y in range(0, new_att.shape[1]):
min_point_idx = dist_arr[g_idx].argmin()
closest_pvt = in_pvt[min_point_idx]
distance = dist_arr[g_idx][min_point_idx] / real_loose
distance = np.power(distance, 2)
g_idx += 1
new_att[x, y] = distance
return np.clip(new_att, 0.0, 1.0)
def get_pivot_points(in_att):
ret_points = []
for x in range(0, in_att.shape[0]):
for y in range(0, in_att.shape[1]):
if in_att[x, y] > 0.8:
ret_points.append((x, y))
return ret_points
def main():
parser = argparse.ArgumentParser(
description="Postprocess durations to become alignments"
)
parser.add_argument(
"--dump-dir",
default="dump",
type=str,
help="Path of dump directory",
)
parser.add_argument(
"--looseness",
default=3.5,
type=float,
help="Looseness of the generated guided attention map. Lower values = tighter",
)
args = parser.parse_args()
dump_dir = args.dump_dir
dump_sets = ["train", "valid"]
for d_set in dump_sets:
full_fol = os.path.join(dump_dir, d_set)
align_path = os.path.join(full_fol, "alignments")
ids_path = os.path.join(full_fol, "ids")
durations_path = os.path.join(full_fol, "durations")
safemkdir(align_path)
for duration_fn in tqdm(os.listdir(durations_path)):
if not ".npy" in duration_fn:
continue
id_fn = duration_fn.replace("-durations", "-ids")
id_path = os.path.join(ids_path, id_fn)
duration_path = os.path.join(durations_path, duration_fn)
duration_arr = np.load(duration_path)
id_arr = np.load(id_path)
id_true_size = len(id_arr)
align = duration_to_alignment(duration_arr)
if align.shape[0] != id_true_size:
align, points = rescale_alignment(align, id_true_size)
else:
points = get_pivot_points(align)
if len(points) == 0:
print("WARNING points are empty for", id_fn)
align = create_guided(align, points, args.looseness)
align_fn = id_fn.replace("-ids", "-alignment")
align_full_fn = os.path.join(align_path, align_fn)
np.save(align_full_fn, align.astype("float32"))
if __name__ == "__main__":
main()
| 4,850 | 27.704142 | 101 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/tacotron2/train_tacotron2.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train Tacotron2."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import numpy as np
import yaml
from tqdm import tqdm
import tensorflow_tts
from examples.tacotron2.tacotron_dataset import CharactorMelDataset
from tensorflow_tts.configs.tacotron2 import Tacotron2Config
from tensorflow_tts.models import TFTacotron2
from tensorflow_tts.optimizers import AdamWeightDecay, WarmUp
from tensorflow_tts.trainers import Seq2SeqBasedTrainer
from tensorflow_tts.utils import calculate_2d_loss, calculate_3d_loss, return_strategy
class Tacotron2Trainer(Seq2SeqBasedTrainer):
"""Tacotron2 Trainer class based on Seq2SeqBasedTrainer."""
def __init__(
self,
config,
strategy,
steps=0,
epochs=0,
is_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_mixed_precision (bool): Use mixed precision or not.
"""
super(Tacotron2Trainer, self).__init__(
steps=steps,
epochs=epochs,
config=config,
strategy=strategy,
is_mixed_precision=is_mixed_precision,
)
# define metrics to aggregates data and use tf.summary logs them
self.list_metrics_name = [
"stop_token_loss",
"mel_loss_before",
"mel_loss_after",
"guided_attention_loss",
]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
self.config = config
def compile(self, model, optimizer):
super().compile(model, optimizer)
self.binary_crossentropy = tf.keras.losses.BinaryCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
self.mse = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE
)
self.mae = tf.keras.losses.MeanAbsoluteError(
reduction=tf.keras.losses.Reduction.NONE
)
def _train_step(self, batch):
"""Here we re-define _train_step because apply input_signature make
the training progress slower on my experiment. Note that input_signature
is apply on based_trainer by default.
"""
if self._already_apply_input_signature is False:
self.one_step_forward = tf.function(
self._one_step_forward, experimental_relax_shapes=True
)
self.one_step_evaluate = tf.function(
self._one_step_evaluate, experimental_relax_shapes=True
)
self.one_step_predict = tf.function(
self._one_step_predict, experimental_relax_shapes=True
)
self._already_apply_input_signature = True
# run one_step_forward
self.one_step_forward(batch)
# update counts
self.steps += 1
self.tqdm.update(1)
self._check_train_finish()
def _one_step_evaluate_per_replica(self, batch):
"""One step evaluate per GPU
Tacotron-2 used teacher-forcing when training and evaluation.
So we need pass `training=True` for inference step.
"""
outputs = self._model(**batch, training=True)
_, dict_metrics_losses = self.compute_per_example_losses(batch, outputs)
self.update_eval_metrics(dict_metrics_losses)
def _one_step_predict_per_replica(self, batch):
"""One step predict per GPU
Tacotron-2 used teacher-forcing when training and evaluation.
So we need pass `training=True` for inference step.
"""
outputs = self._model(**batch, training=True)
return outputs
def compute_per_example_losses(self, batch, outputs):
"""Compute per example losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
(
decoder_output,
post_mel_outputs,
stop_token_predictions,
alignment_historys,
) = outputs
mel_loss_before = calculate_3d_loss(
batch["mel_gts"], decoder_output, loss_fn=self.mae
)
mel_loss_after = calculate_3d_loss(
batch["mel_gts"], post_mel_outputs, loss_fn=self.mae
)
# calculate stop_loss
max_mel_length = (
tf.reduce_max(batch["mel_lengths"])
if self.config["use_fixed_shapes"] is False
else [self.config["max_mel_length"]]
)
stop_gts = tf.expand_dims(
tf.range(tf.reduce_max(max_mel_length), dtype=tf.int32), 0
) # [1, max_len]
stop_gts = tf.tile(
stop_gts, [tf.shape(batch["mel_lengths"])[0], 1]
) # [B, max_len]
stop_gts = tf.cast(
tf.math.greater_equal(stop_gts, tf.expand_dims(batch["mel_lengths"], 1)),
tf.float32,
)
stop_token_loss = calculate_2d_loss(
stop_gts, stop_token_predictions, loss_fn=self.binary_crossentropy
)
# calculate guided attention loss.
attention_masks = tf.cast(
tf.math.not_equal(batch["g_attentions"], -1.0), tf.float32
)
loss_att = tf.reduce_sum(
tf.abs(alignment_historys * batch["g_attentions"]) * attention_masks,
axis=[1, 2],
)
loss_att /= tf.reduce_sum(attention_masks, axis=[1, 2])
per_example_losses = (
stop_token_loss + mel_loss_before + mel_loss_after + loss_att
)
dict_metrics_losses = {
"stop_token_loss": stop_token_loss,
"mel_loss_before": mel_loss_before,
"mel_loss_after": mel_loss_after,
"guided_attention_loss": loss_att,
}
return per_example_losses, dict_metrics_losses
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
import matplotlib.pyplot as plt
# predict with tf.function for faster.
outputs = self.one_step_predict(batch)
(
decoder_output,
mel_outputs,
stop_token_predictions,
alignment_historys,
) = outputs
mel_gts = batch["mel_gts"]
utt_ids = batch["utt_ids"]
# convert to tensor.
# here we just take a sample at first replica.
try:
mels_before = decoder_output.values[0].numpy()
mels_after = mel_outputs.values[0].numpy()
mel_gts = mel_gts.values[0].numpy()
alignment_historys = alignment_historys.values[0].numpy()
utt_ids = utt_ids.values[0].numpy()
except Exception:
mels_before = decoder_output.numpy()
mels_after = mel_outputs.numpy()
mel_gts = mel_gts.numpy()
alignment_historys = alignment_historys.numpy()
utt_ids = utt_ids.numpy()
# check directory
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (mel_gt, mel_before, mel_after, alignment_history) in enumerate(
zip(mel_gts, mels_before, mels_after, alignment_historys), 0
):
mel_gt = tf.reshape(mel_gt, (-1, 80)).numpy() # [length, 80]
mel_before = tf.reshape(mel_before, (-1, 80)).numpy() # [length, 80]
mel_after = tf.reshape(mel_after, (-1, 80)).numpy() # [length, 80]
# plot figure and save it
utt_id = utt_ids[idx]
figname = os.path.join(dirname, f"{utt_id}.png")
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
im = ax1.imshow(np.rot90(mel_gt), aspect="auto", interpolation="none")
ax1.set_title("Target Mel-Spectrogram")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax1)
ax2.set_title(f"Predicted Mel-before-Spectrogram @ {self.steps} steps")
im = ax2.imshow(np.rot90(mel_before), aspect="auto", interpolation="none")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax2)
ax3.set_title(f"Predicted Mel-after-Spectrogram @ {self.steps} steps")
im = ax3.imshow(np.rot90(mel_after), aspect="auto", interpolation="none")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax3)
plt.tight_layout()
plt.savefig(figname)
plt.close()
# plot alignment
figname = os.path.join(dirname, f"{idx}_alignment.png")
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.set_title(f"Alignment @ {self.steps} steps")
im = ax.imshow(
alignment_history, aspect="auto", origin="lower", interpolation="none"
)
fig.colorbar(im, ax=ax)
xlabel = "Decoder timestep"
plt.xlabel(xlabel)
plt.ylabel("Encoder timestep")
plt.tight_layout()
plt.savefig(figname)
plt.close()
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train FastSpeech (See detail in tensorflow_tts/bin/train-fastspeech.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="usr norm-mels for train or raw."
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="pretrained weights .h5 file to load weights from. Auto-skips non-matching layers",
)
parser.add_argument(
"--use-fal",
default=0,
type=int,
help="Use forced alignment guided attention loss or regular",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.mixed_precision = bool(args.mixed_precision)
args.use_norm = bool(args.use_norm)
args.use_fal = bool(args.use_fal)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["mel_length_threshold"]
else:
mel_length_threshold = 0
if config["format"] == "npy":
charactor_query = "*-ids.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
align_query = "*-alignment.npy" if args.use_fal is True else ""
charactor_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy are supported.")
train_dataset = CharactorMelDataset(
dataset=config["tacotron2_params"]["dataset"],
root_dir=args.train_dir,
charactor_query=charactor_query,
mel_query=mel_query,
charactor_load_fn=charactor_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
reduction_factor=config["tacotron2_params"]["reduction_factor"],
use_fixed_shapes=config["use_fixed_shapes"],
align_query=align_query,
)
# update max_mel_length and max_char_length to config
config.update({"max_mel_length": int(train_dataset.max_mel_length)})
config.update({"max_char_length": int(train_dataset.max_char_length)})
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
train_dataset = train_dataset.create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = CharactorMelDataset(
dataset=config["tacotron2_params"]["dataset"],
root_dir=args.dev_dir,
charactor_query=charactor_query,
mel_query=mel_query,
charactor_load_fn=charactor_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
reduction_factor=config["tacotron2_params"]["reduction_factor"],
use_fixed_shapes=False, # don't need apply fixed shape for evaluation.
align_query=align_query,
).create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = Tacotron2Trainer(
config=config,
strategy=STRATEGY,
steps=0,
epochs=0,
is_mixed_precision=args.mixed_precision,
)
with STRATEGY.scope():
# define model.
tacotron_config = Tacotron2Config(**config["tacotron2_params"])
tacotron2 = TFTacotron2(config=tacotron_config, name="tacotron2")
tacotron2._build()
tacotron2.summary()
if len(args.pretrained) > 1:
tacotron2.load_weights(args.pretrained, by_name=True, skip_mismatch=True)
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
# AdamW for tacotron2
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_steps=config["optimizer_params"]["decay_steps"],
end_learning_rate=config["optimizer_params"]["end_learning_rate"],
)
learning_rate_fn = WarmUp(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_schedule_fn=learning_rate_fn,
warmup_steps=int(
config["train_max_steps"]
* config["optimizer_params"]["warmup_proportion"]
),
)
optimizer = AdamWeightDecay(
learning_rate=learning_rate_fn,
weight_decay_rate=config["optimizer_params"]["weight_decay"],
beta_1=0.9,
beta_2=0.98,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"],
)
_ = optimizer.iterations
# compile trainer
trainer.compile(model=tacotron2, optimizer=optimizer)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 18,412 | 33.807183 | 96 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/tacotron2/extract_postnets.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract durations based-on tacotron-2 alignments for FastSpeech."""
import argparse
import logging
import os
from numba import jit
import sys
sys.path.append(".")
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import yaml
from tqdm import tqdm
from examples.tacotron2.tacotron_dataset import CharactorMelDataset
from tensorflow_tts.configs import Tacotron2Config
from tensorflow_tts.models import TFTacotron2
@jit(nopython=True)
def get_duration_from_alignment(alignment):
D = np.array([0 for _ in range(np.shape(alignment)[0])])
for i in range(np.shape(alignment)[1]):
max_index = list(alignment[:, i]).index(alignment[:, i].max())
D[max_index] = D[max_index] + 1
return D
def main():
"""Running extract tacotron-2 durations."""
parser = argparse.ArgumentParser(
description="Extract durations from charactor with trained Tacotron-2 "
"(See detail in tensorflow_tts/example/tacotron-2/extract_duration.py)."
)
parser.add_argument(
"--rootdir",
default=None,
type=str,
required=True,
help="directory including ids/durations files.",
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save generated mels."
)
parser.add_argument(
"--checkpoint", type=str, required=True, help="checkpoint file to be loaded."
)
parser.add_argument(
"--use-norm", default=1, type=int, help="usr norm-mels for train or raw."
)
parser.add_argument("--batch-size", default=32, type=int, help="batch size.")
parser.add_argument("--win-front", default=3, type=int, help="win-front.")
parser.add_argument("--win-back", default=3, type=int, help="win-front.")
parser.add_argument(
"--use-window-mask", default=1, type=int, help="toggle window masking."
)
parser.add_argument("--save-alignment", default=0, type=int, help="save-alignment.")
parser.add_argument(
"--config",
default=None,
type=str,
required=True,
help="yaml format configuration file. if not explicitly provided, "
"it will be searched in the checkpoint directory. (default=None)",
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# load config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
if config["format"] == "npy":
char_query = "*-ids.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
char_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy is supported.")
# define data-loader
dataset = CharactorMelDataset(
dataset=config["tacotron2_params"]["dataset"],
root_dir=args.rootdir,
charactor_query=char_query,
mel_query=mel_query,
charactor_load_fn=char_load_fn,
mel_load_fn=mel_load_fn,
reduction_factor=config["tacotron2_params"]["reduction_factor"],
use_fixed_shapes=True,
)
dataset = dataset.create(
allow_cache=True, batch_size=args.batch_size, drop_remainder=False
)
# define model and load checkpoint
tacotron2 = TFTacotron2(
config=Tacotron2Config(**config["tacotron2_params"]),
name="tacotron2",
)
tacotron2._build() # build model to be able load_weights.
tacotron2.load_weights(args.checkpoint)
# apply tf.function for tacotron2.
tacotron2 = tf.function(tacotron2, experimental_relax_shapes=True)
for data in tqdm(dataset, desc="[Extract Postnets]"):
utt_ids = data["utt_ids"]
input_lengths = data["input_lengths"]
mel_lengths = data["mel_lengths"]
utt_ids = utt_ids.numpy()
real_mel_lengths = data["real_mel_lengths"]
mel_gt = data["mel_gts"]
del data["real_mel_lengths"]
# tacotron2 inference.
mel_outputs, post_mel_outputs, stop_outputs, alignment_historys = tacotron2(
**data,
use_window_mask=args.use_window_mask,
win_front=args.win_front,
win_back=args.win_back,
training=True,
)
# convert to numpy
alignment_historys = alignment_historys.numpy()
post_mel_outputs = post_mel_outputs.numpy()
mel_gt = mel_gt.numpy()
outdpost = os.path.join(args.outdir, "postnets")
if not os.path.exists(outdpost):
os.makedirs(outdpost)
for i, alignment in enumerate(alignment_historys):
real_char_length = input_lengths[i].numpy()
real_mel_length = real_mel_lengths[i].numpy()
alignment_mel_length = int(
np.ceil(
real_mel_length / config["tacotron2_params"]["reduction_factor"]
)
)
alignment = alignment[:real_char_length, :alignment_mel_length]
d = get_duration_from_alignment(alignment) # [max_char_len]
d = d * config["tacotron2_params"]["reduction_factor"]
assert (
np.sum(d) >= real_mel_length
), f"{d}, {np.sum(d)}, {alignment_mel_length}, {real_mel_length}"
if np.sum(d) > real_mel_length:
rest = np.sum(d) - real_mel_length
# print(d, np.sum(d), real_mel_length)
if d[-1] > rest:
d[-1] -= rest
elif d[0] > rest:
d[0] -= rest
else:
d[-1] -= rest // 2
d[0] -= rest - rest // 2
assert d[-1] >= 0 and d[0] >= 0, f"{d}, {np.sum(d)}, {real_mel_length}"
saved_name = utt_ids[i].decode("utf-8")
# check a length compatible
assert (
len(d) == real_char_length
), f"different between len_char and len_durations, {len(d)} and {real_char_length}"
assert (
np.sum(d) == real_mel_length
), f"different between sum_durations and len_mel, {np.sum(d)} and {real_mel_length}"
# save D to folder.
np.save(
os.path.join(outdpost, f"{saved_name}-postnet.npy"),
post_mel_outputs[i][:][:real_mel_length].astype(np.float32),
allow_pickle=False,
)
# save alignment to debug.
if args.save_alignment == 1:
figname = os.path.join(args.outdir, f"{saved_name}_alignment.png")
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.set_title(f"Alignment of {saved_name}")
im = ax.imshow(
alignment, aspect="auto", origin="lower", interpolation="none"
)
fig.colorbar(im, ax=ax)
xlabel = "Decoder timestep"
plt.xlabel(xlabel)
plt.ylabel("Encoder timestep")
plt.tight_layout()
plt.savefig(figname)
plt.close()
if __name__ == "__main__":
main()
| 8,592 | 33.649194 | 96 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/tacotron2/extract_duration.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract durations based-on tacotron-2 alignments for FastSpeech."""
import argparse
import logging
import os
from numba import jit
import sys
sys.path.append(".")
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import yaml
from tqdm import tqdm
from examples.tacotron2.tacotron_dataset import CharactorMelDataset
from tensorflow_tts.configs import Tacotron2Config
from tensorflow_tts.models import TFTacotron2
@jit(nopython=True)
def get_duration_from_alignment(alignment):
D = np.array([0 for _ in range(np.shape(alignment)[0])])
for i in range(np.shape(alignment)[1]):
max_index = list(alignment[:, i]).index(alignment[:, i].max())
D[max_index] = D[max_index] + 1
return D
def main():
"""Running extract tacotron-2 durations."""
parser = argparse.ArgumentParser(
description="Extract durations from charactor with trained Tacotron-2 "
"(See detail in tensorflow_tts/example/tacotron-2/extract_duration.py)."
)
parser.add_argument(
"--rootdir",
default=None,
type=str,
required=True,
help="directory including ids/durations files.",
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save generated speech."
)
parser.add_argument(
"--checkpoint", type=str, required=True, help="checkpoint file to be loaded."
)
parser.add_argument(
"--use-norm", default=1, type=int, help="usr norm-mels for train or raw."
)
parser.add_argument("--batch-size", default=8, type=int, help="batch size.")
parser.add_argument("--win-front", default=2, type=int, help="win-front.")
parser.add_argument("--win-back", default=2, type=int, help="win-front.")
parser.add_argument(
"--use-window-mask", default=1, type=int, help="toggle window masking."
)
parser.add_argument("--save-alignment", default=0, type=int, help="save-alignment.")
parser.add_argument(
"--config",
default=None,
type=str,
required=True,
help="yaml format configuration file. if not explicitly provided, "
"it will be searched in the checkpoint directory. (default=None)",
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# load config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
if config["format"] == "npy":
char_query = "*-ids.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
char_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy is supported.")
# define data-loader
dataset = CharactorMelDataset(
dataset=config["tacotron2_params"]["dataset"],
root_dir=args.rootdir,
charactor_query=char_query,
mel_query=mel_query,
charactor_load_fn=char_load_fn,
mel_load_fn=mel_load_fn,
reduction_factor=config["tacotron2_params"]["reduction_factor"],
use_fixed_shapes=True,
)
dataset = dataset.create(allow_cache=True, batch_size=args.batch_size, drop_remainder=False)
# define model and load checkpoint
tacotron2 = TFTacotron2(
config=Tacotron2Config(**config["tacotron2_params"]),
name="tacotron2",
)
tacotron2._build() # build model to be able load_weights.
tacotron2.load_weights(args.checkpoint)
# apply tf.function for tacotron2.
tacotron2 = tf.function(tacotron2, experimental_relax_shapes=True)
for data in tqdm(dataset, desc="[Extract Duration]"):
utt_ids = data["utt_ids"]
input_lengths = data["input_lengths"]
mel_lengths = data["mel_lengths"]
utt_ids = utt_ids.numpy()
real_mel_lengths = data["real_mel_lengths"]
del data["real_mel_lengths"]
# tacotron2 inference.
mel_outputs, post_mel_outputs, stop_outputs, alignment_historys = tacotron2(
**data,
use_window_mask=args.use_window_mask,
win_front=args.win_front,
win_back=args.win_back,
training=True,
)
# convert to numpy
alignment_historys = alignment_historys.numpy()
for i, alignment in enumerate(alignment_historys):
real_char_length = input_lengths[i].numpy()
real_mel_length = real_mel_lengths[i].numpy()
alignment_mel_length = int(
np.ceil(
real_mel_length / config["tacotron2_params"]["reduction_factor"]
)
)
alignment = alignment[:real_char_length, :alignment_mel_length]
d = get_duration_from_alignment(alignment) # [max_char_len]
d = d * config["tacotron2_params"]["reduction_factor"]
assert (
np.sum(d) >= real_mel_length
), f"{d}, {np.sum(d)}, {alignment_mel_length}, {real_mel_length}"
if np.sum(d) > real_mel_length:
rest = np.sum(d) - real_mel_length
# print(d, np.sum(d), real_mel_length)
if d[-1] > rest:
d[-1] -= rest
elif d[0] > rest:
d[0] -= rest
else:
d[-1] -= rest // 2
d[0] -= rest - rest // 2
assert d[-1] >= 0 and d[0] >= 0, f"{d}, {np.sum(d)}, {real_mel_length}"
saved_name = utt_ids[i].decode("utf-8")
# check a length compatible
assert (
len(d) == real_char_length
), f"different between len_char and len_durations, {len(d)} and {real_char_length}"
assert (
np.sum(d) == real_mel_length
), f"different between sum_durations and len_mel, {np.sum(d)} and {real_mel_length}"
# save D to folder.
np.save(
os.path.join(args.outdir, f"{saved_name}-durations.npy"),
d.astype(np.int32),
allow_pickle=False,
)
# save alignment to debug.
if args.save_alignment == 1:
figname = os.path.join(args.outdir, f"{saved_name}_alignment.png")
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
ax.set_title(f"Alignment of {saved_name}")
im = ax.imshow(
alignment, aspect="auto", origin="lower", interpolation="none"
)
fig.colorbar(im, ax=ax)
xlabel = "Decoder timestep"
plt.xlabel(xlabel)
plt.ylabel("Encoder timestep")
plt.tight_layout()
plt.savefig(figname)
plt.close()
if __name__ == "__main__":
main()
| 8,291 | 33.987342 | 96 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/multiband_melgan_hf/train_multiband_melgan_hf.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train Multi-Band MelGAN + MPD."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import numpy as np
import soundfile as sf
import yaml
from tensorflow.keras.mixed_precision import experimental as mixed_precision
import tensorflow_tts
from examples.melgan.audio_mel_dataset import AudioMelDataset
from examples.hifigan.train_hifigan import TFHifiGANDiscriminator
from examples.melgan.train_melgan import MelganTrainer, collater
from tensorflow_tts.configs import (
MultiBandMelGANDiscriminatorConfig,
MultiBandMelGANGeneratorConfig,
HifiGANDiscriminatorConfig,
)
from tensorflow_tts.losses import TFMultiResolutionSTFT
from tensorflow_tts.models import (
TFPQMF,
TFMelGANGenerator,
TFMelGANMultiScaleDiscriminator,
TFHifiGANMultiPeriodDiscriminator,
)
from tensorflow_tts.utils import calculate_2d_loss, calculate_3d_loss, return_strategy
class MultiBandMelganTrainer(MelganTrainer):
"""Multi-Band MelGAN Trainer class based on MelganTrainer."""
def __init__(
self,
config,
strategy,
steps=0,
epochs=0,
is_generator_mixed_precision=False,
is_discriminator_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_generator_mixed_precision (bool): Use mixed precision for generator or not.
is_discriminator_mixed_precision (bool): Use mixed precision for discriminator or not.
"""
super(MultiBandMelganTrainer, self).__init__(
config=config,
steps=steps,
epochs=epochs,
strategy=strategy,
is_generator_mixed_precision=is_generator_mixed_precision,
is_discriminator_mixed_precision=is_discriminator_mixed_precision,
)
# define metrics to aggregates data and use tf.summary logs them
self.list_metrics_name = [
"adversarial_loss",
"subband_spectral_convergence_loss",
"subband_log_magnitude_loss",
"fullband_spectral_convergence_loss",
"fullband_log_magnitude_loss",
"gen_loss",
"real_loss",
"fake_loss",
"dis_loss",
]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
def compile(self, gen_model, dis_model, gen_optimizer, dis_optimizer, pqmf):
super().compile(gen_model, dis_model, gen_optimizer, dis_optimizer)
# define loss
self.sub_band_stft_loss = TFMultiResolutionSTFT(
**self.config["subband_stft_loss_params"]
)
self.full_band_stft_loss = TFMultiResolutionSTFT(
**self.config["stft_loss_params"]
)
# define pqmf module
self.pqmf = pqmf
def compute_per_example_generator_losses(self, batch, outputs):
"""Compute per example generator losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
dict_metrics_losses = {}
per_example_losses = 0.0
audios = batch["audios"]
y_mb_hat = outputs
y_hat = self.pqmf.synthesis(y_mb_hat)
y_mb = self.pqmf.analysis(tf.expand_dims(audios, -1))
y_mb = tf.transpose(y_mb, (0, 2, 1)) # [B, subbands, T//subbands]
y_mb = tf.reshape(y_mb, (-1, tf.shape(y_mb)[-1])) # [B * subbands, T']
y_mb_hat = tf.transpose(y_mb_hat, (0, 2, 1)) # [B, subbands, T//subbands]
y_mb_hat = tf.reshape(
y_mb_hat, (-1, tf.shape(y_mb_hat)[-1])
) # [B * subbands, T']
# calculate sub/full band spectral_convergence and log mag loss.
sub_sc_loss, sub_mag_loss = calculate_2d_loss(
y_mb, y_mb_hat, self.sub_band_stft_loss
)
sub_sc_loss = tf.reduce_mean(
tf.reshape(sub_sc_loss, [-1, self.pqmf.subbands]), -1
)
sub_mag_loss = tf.reduce_mean(
tf.reshape(sub_mag_loss, [-1, self.pqmf.subbands]), -1
)
full_sc_loss, full_mag_loss = calculate_2d_loss(
audios, tf.squeeze(y_hat, -1), self.full_band_stft_loss
)
# define generator loss
gen_loss = 0.5 * (sub_sc_loss + sub_mag_loss) + 0.5 * (
full_sc_loss + full_mag_loss
)
if self.steps >= self.config["discriminator_train_start_steps"]:
p_hat = self._discriminator(y_hat)
p = self._discriminator(tf.expand_dims(audios, 2))
adv_loss = 0.0
for i in range(len(p_hat)):
adv_loss += calculate_3d_loss(
tf.ones_like(p_hat[i][-1]), p_hat[i][-1], loss_fn=self.mse_loss
)
adv_loss /= i + 1
gen_loss += self.config["lambda_adv"] * adv_loss
dict_metrics_losses.update(
{"adversarial_loss": adv_loss},
)
dict_metrics_losses.update({"gen_loss": gen_loss})
dict_metrics_losses.update({"subband_spectral_convergence_loss": sub_sc_loss})
dict_metrics_losses.update({"subband_log_magnitude_loss": sub_mag_loss})
dict_metrics_losses.update({"fullband_spectral_convergence_loss": full_sc_loss})
dict_metrics_losses.update({"fullband_log_magnitude_loss": full_mag_loss})
per_example_losses = gen_loss
return per_example_losses, dict_metrics_losses
def compute_per_example_discriminator_losses(self, batch, gen_outputs):
"""Compute per example discriminator losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
y_mb_hat = gen_outputs
y_hat = self.pqmf.synthesis(y_mb_hat)
(
per_example_losses,
dict_metrics_losses,
) = super().compute_per_example_discriminator_losses(batch, y_hat)
return per_example_losses, dict_metrics_losses
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
import matplotlib.pyplot as plt
y_mb_batch_ = self.one_step_predict(batch) # [B, T // subbands, subbands]
y_batch = batch["audios"]
utt_ids = batch["utt_ids"]
# convert to tensor.
# here we just take a sample at first replica.
try:
y_mb_batch_ = y_mb_batch_.values[0].numpy()
y_batch = y_batch.values[0].numpy()
utt_ids = utt_ids.values[0].numpy()
except Exception:
y_mb_batch_ = y_mb_batch_.numpy()
y_batch = y_batch.numpy()
utt_ids = utt_ids.numpy()
y_batch_ = self.pqmf.synthesis(y_mb_batch_).numpy() # [B, T, 1]
# check directory
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (y, y_) in enumerate(zip(y_batch, y_batch_), 0):
# convert to ndarray
y, y_ = tf.reshape(y, [-1]).numpy(), tf.reshape(y_, [-1]).numpy()
# plit figure and save it
utt_id = utt_ids[idx]
figname = os.path.join(dirname, f"{utt_id}.png")
plt.subplot(2, 1, 1)
plt.plot(y)
plt.title("groundtruth speech")
plt.subplot(2, 1, 2)
plt.plot(y_)
plt.title(f"generated speech @ {self.steps} steps")
plt.tight_layout()
plt.savefig(figname)
plt.close()
# save as wavefile
y = np.clip(y, -1, 1)
y_ = np.clip(y_, -1, 1)
sf.write(
figname.replace(".png", "_ref.wav"),
y,
self.config["sampling_rate"],
"PCM_16",
)
sf.write(
figname.replace(".png", "_gen.wav"),
y_,
self.config["sampling_rate"],
"PCM_16",
)
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train MultiBand MelGAN (See detail in examples/multiband_melgan/train_multiband_melgan.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="use norm mels for training or raw."
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--generator_mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--discriminator_mixed_precision",
default=0,
type=int,
help="using mixed precision for discriminator or not.",
)
parser.add_argument(
"--postnets",
default=0,
type=int,
help="using postnets instead of gt mels or not.",
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="path of .h5 mb-melgan generator and discriminator to load weights from. must be comma delineated, like ptgen.h5,ptdisc.h5",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.generator_mixed_precision == 1 or args.discriminator_mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.generator_mixed_precision = bool(args.generator_mixed_precision)
args.discriminator_mixed_precision = bool(args.discriminator_mixed_precision)
args.use_norm = bool(args.use_norm)
args.postnets = bool(args.postnets)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify either --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["batch_max_steps"] // config[
"hop_size"
] + 2 * config["multiband_melgan_generator_params"].get("aux_context_window", 0)
else:
mel_length_threshold = None
if config["format"] == "npy":
audio_query = "*-wave.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
audio_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy are supported.")
if args.postnets is True:
mel_query = "*-postnet.npy"
logging.info("Using postnets")
else:
logging.info("Using GT Mels")
# define train/valid dataset
train_dataset = AudioMelDataset(
root_dir=args.train_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(config["batch_max_steps"], dtype=tf.int32),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = AudioMelDataset(
root_dir=args.dev_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(
config["batch_max_steps_valid"], dtype=tf.int32
),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = MultiBandMelganTrainer(
steps=0,
epochs=0,
config=config,
strategy=STRATEGY,
is_generator_mixed_precision=args.generator_mixed_precision,
is_discriminator_mixed_precision=args.discriminator_mixed_precision,
)
with STRATEGY.scope():
# define generator and discriminator
generator = TFMelGANGenerator(
MultiBandMelGANGeneratorConfig(
**config["multiband_melgan_generator_params"]
),
name="multi_band_melgan_generator",
)
multiscale_discriminator = TFMelGANMultiScaleDiscriminator(
MultiBandMelGANDiscriminatorConfig(
**config["multiband_melgan_discriminator_params"]
),
name="multi_band_melgan_discriminator",
)
multiperiod_discriminator = TFHifiGANMultiPeriodDiscriminator(
HifiGANDiscriminatorConfig(**config["hifigan_discriminator_params"]),
name="hifigan_multiperiod_discriminator",
)
pqmf = TFPQMF(
MultiBandMelGANGeneratorConfig(
**config["multiband_melgan_generator_params"]
),
dtype=tf.float32,
name="pqmf",
)
discriminator = TFHifiGANDiscriminator(
multiperiod_discriminator,
multiscale_discriminator,
name="hifigan_discriminator",
)
# dummy input to build model.
fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)
y_mb_hat = generator(fake_mels)
y_hat = pqmf.synthesis(y_mb_hat)
discriminator(y_hat)
if len(args.pretrained) > 1:
pt_splits = args.pretrained.split(",")
generator.load_weights(pt_splits[0])
discriminator.load_weights(pt_splits[1])
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
generator.summary()
discriminator.summary()
# define optimizer
generator_lr_fn = getattr(
tf.keras.optimizers.schedules, config["generator_optimizer_params"]["lr_fn"]
)(**config["generator_optimizer_params"]["lr_params"])
discriminator_lr_fn = getattr(
tf.keras.optimizers.schedules,
config["discriminator_optimizer_params"]["lr_fn"],
)(**config["discriminator_optimizer_params"]["lr_params"])
gen_optimizer = tf.keras.optimizers.Adam(
learning_rate=generator_lr_fn,
amsgrad=config["generator_optimizer_params"]["amsgrad"],
)
dis_optimizer = tf.keras.optimizers.Adam(
learning_rate=discriminator_lr_fn,
amsgrad=config["discriminator_optimizer_params"]["amsgrad"],
)
_ = gen_optimizer.iterations
_ = dis_optimizer.iterations
trainer.compile(
gen_model=generator,
dis_model=discriminator,
gen_optimizer=gen_optimizer,
dis_optimizer=dis_optimizer,
pqmf=pqmf,
)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 19,162 | 33.40395 | 137 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/multiband_melgan_hf/decode_mb_melgan.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decode trained Mb-Melgan from folder."""
import argparse
import logging
import os
import numpy as np
import soundfile as sf
import yaml
from tqdm import tqdm
from tensorflow_tts.configs import MultiBandMelGANGeneratorConfig
from tensorflow_tts.datasets import MelDataset
from tensorflow_tts.models import TFPQMF, TFMelGANGenerator
def main():
"""Run melgan decoding from folder."""
parser = argparse.ArgumentParser(
description="Generate Audio from melspectrogram with trained melgan "
"(See detail in example/melgan/decode_melgan.py)."
)
parser.add_argument(
"--rootdir",
default=None,
type=str,
required=True,
help="directory including ids/durations files.",
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save generated speech."
)
parser.add_argument(
"--checkpoint", type=str, required=True, help="checkpoint file to be loaded."
)
parser.add_argument(
"--use-norm", type=int, default=1, help="Use norm or raw melspectrogram."
)
parser.add_argument("--batch-size", type=int, default=8, help="batch_size.")
parser.add_argument(
"--config",
default=None,
type=str,
required=True,
help="yaml format configuration file. if not explicitly provided, "
"it will be searched in the checkpoint directory. (default=None)",
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# load config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
if config["format"] == "npy":
mel_query = "*-fs-after-feats.npy" if "fastspeech" in args.rootdir else "*-norm-feats.npy" if args.use_norm == 1 else "*-raw-feats.npy"
mel_load_fn = np.load
else:
raise ValueError("Only npy is supported.")
# define data-loader
dataset = MelDataset(
root_dir=args.rootdir,
mel_query=mel_query,
mel_load_fn=mel_load_fn,
)
dataset = dataset.create(batch_size=args.batch_size)
# define model and load checkpoint
mb_melgan = TFMelGANGenerator(
config=MultiBandMelGANGeneratorConfig(**config["multiband_melgan_generator_params"]),
name="multiband_melgan_generator",
)
mb_melgan._build()
mb_melgan.load_weights(args.checkpoint)
pqmf = TFPQMF(
config=MultiBandMelGANGeneratorConfig(**config["multiband_melgan_generator_params"]), name="pqmf"
)
for data in tqdm(dataset, desc="[Decoding]"):
utt_ids, mels, mel_lengths = data["utt_ids"], data["mels"], data["mel_lengths"]
# melgan inference.
generated_subbands = mb_melgan(mels)
generated_audios = pqmf.synthesis(generated_subbands)
# convert to numpy.
generated_audios = generated_audios.numpy() # [B, T]
# save to outdir
for i, audio in enumerate(generated_audios):
utt_id = utt_ids[i].numpy().decode("utf-8")
sf.write(
os.path.join(args.outdir, f"{utt_id}.wav"),
audio[: mel_lengths[i].numpy() * config["hop_size"]],
config["sampling_rate"],
"PCM_16",
)
if __name__ == "__main__":
main()
| 4,752 | 31.554795 | 143 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/fastspeech/train_fastspeech.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train FastSpeech."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import argparse
import logging
import os
import sys
sys.path.append(".")
import numpy as np
import yaml
import tensorflow_tts
import tensorflow_tts.configs.fastspeech as FASTSPEECH_CONFIG
from examples.fastspeech.fastspeech_dataset import CharactorDurationMelDataset
from tensorflow_tts.models import TFFastSpeech
from tensorflow_tts.optimizers import AdamWeightDecay, WarmUp
from tensorflow_tts.trainers import Seq2SeqBasedTrainer
from tensorflow_tts.utils import calculate_2d_loss, calculate_3d_loss, return_strategy
class FastSpeechTrainer(Seq2SeqBasedTrainer):
"""FastSpeech Trainer class based on Seq2SeqBasedTrainer."""
def __init__(
self, config, strategy, steps=0, epochs=0, is_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_mixed_precision (bool): Use mixed precision or not.
"""
super(FastSpeechTrainer, self).__init__(
steps=steps,
epochs=epochs,
config=config,
strategy=strategy,
is_mixed_precision=is_mixed_precision,
)
# define metrics to aggregates data and use tf.summary logs them
self.list_metrics_name = ["duration_loss", "mel_loss_before", "mel_loss_after"]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
self.config = config
def compile(self, model, optimizer):
super().compile(model, optimizer)
self.mse = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE
)
self.mae = tf.keras.losses.MeanAbsoluteError(
reduction=tf.keras.losses.Reduction.NONE
)
def compute_per_example_losses(self, batch, outputs):
"""Compute per example losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
mel_before, mel_after, duration_outputs = outputs
log_duration = tf.math.log(
tf.cast(tf.math.add(batch["duration_gts"], 1), tf.float32)
)
duration_loss = self.mse(log_duration, duration_outputs)
mel_loss_before = calculate_3d_loss(batch["mel_gts"], mel_before, self.mae)
mel_loss_after = calculate_3d_loss(batch["mel_gts"], mel_after, self.mae)
per_example_losses = duration_loss + mel_loss_before + mel_loss_after
dict_metrics_losses = {
"duration_loss": duration_loss,
"mel_loss_before": mel_loss_before,
"mel_loss_after": mel_loss_after,
}
return per_example_losses, dict_metrics_losses
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
import matplotlib.pyplot as plt
# predict with tf.function.
outputs = self.one_step_predict(batch)
mels_before, mels_after, *_ = outputs
mel_gts = batch["mel_gts"]
utt_ids = batch["utt_ids"]
# convert to tensor.
# here we just take a sample at first replica.
try:
mels_before = mels_before.values[0].numpy()
mels_after = mels_after.values[0].numpy()
mel_gts = mel_gts.values[0].numpy()
utt_ids = utt_ids.values[0].numpy()
except Exception:
mels_before = mels_before.numpy()
mels_after = mels_after.numpy()
mel_gts = mel_gts.numpy()
utt_ids = utt_ids.numpy()
# check directory
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (mel_gt, mel_before, mel_after) in enumerate(
zip(mel_gts, mels_before, mels_after), 0
):
mel_gt = tf.reshape(mel_gt, (-1, 80)).numpy() # [length, 80]
mel_before = tf.reshape(mel_before, (-1, 80)).numpy() # [length, 80]
mel_after = tf.reshape(mel_after, (-1, 80)).numpy() # [length, 80]
# plit figure and save it
utt_id = utt_ids[idx].decode("utf-8")
figname = os.path.join(dirname, f"{utt_id}.png")
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
im = ax1.imshow(np.rot90(mel_gt), aspect="auto", interpolation="none")
ax1.set_title("Target Mel-Spectrogram")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax1)
ax2.set_title("Predicted Mel-before-Spectrogram")
im = ax2.imshow(np.rot90(mel_before), aspect="auto", interpolation="none")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax2)
ax3.set_title("Predicted Mel-after-Spectrogram")
im = ax3.imshow(np.rot90(mel_after), aspect="auto", interpolation="none")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax3)
plt.tight_layout()
plt.savefig(figname)
plt.close()
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train FastSpeech (See detail in tensorflow_tts/bin/train-fastspeech.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="usr norm-mels for train or raw."
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="pretrained checkpoint file to load weights from. Auto-skips non-matching layers",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.mixed_precision = bool(args.mixed_precision)
args.use_norm = bool(args.use_norm)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["mel_length_threshold"]
else:
mel_length_threshold = None
if config["format"] == "npy":
charactor_query = "*-ids.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
duration_query = "*-durations.npy"
charactor_load_fn = np.load
mel_load_fn = np.load
duration_load_fn = np.load
else:
raise ValueError("Only npy are supported.")
# define train/valid dataset
train_dataset = CharactorDurationMelDataset(
root_dir=args.train_dir,
charactor_query=charactor_query,
mel_query=mel_query,
duration_query=duration_query,
charactor_load_fn=charactor_load_fn,
mel_load_fn=mel_load_fn,
duration_load_fn=duration_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = CharactorDurationMelDataset(
root_dir=args.dev_dir,
charactor_query=charactor_query,
mel_query=mel_query,
duration_query=duration_query,
charactor_load_fn=charactor_load_fn,
mel_load_fn=mel_load_fn,
duration_load_fn=duration_load_fn,
).create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = FastSpeechTrainer(
config=config,
strategy=STRATEGY,
steps=0,
epochs=0,
is_mixed_precision=args.mixed_precision,
)
with STRATEGY.scope():
# define model
fastspeech = TFFastSpeech(
config=FASTSPEECH_CONFIG.FastSpeechConfig(**config["fastspeech_params"])
)
fastspeech._build()
fastspeech.summary()
if len(args.pretrained) > 1:
fastspeech.load_weights(args.pretrained, by_name=True, skip_mismatch=True)
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
# AdamW for fastspeech
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_steps=config["optimizer_params"]["decay_steps"],
end_learning_rate=config["optimizer_params"]["end_learning_rate"],
)
learning_rate_fn = WarmUp(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_schedule_fn=learning_rate_fn,
warmup_steps=int(
config["train_max_steps"]
* config["optimizer_params"]["warmup_proportion"]
),
)
optimizer = AdamWeightDecay(
learning_rate=learning_rate_fn,
weight_decay_rate=config["optimizer_params"]["weight_decay"],
beta_1=0.9,
beta_2=0.98,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"],
)
_ = optimizer.iterations
# compile trainer
trainer.compile(model=fastspeech, optimizer=optimizer)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 13,591 | 33.762148 | 95 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/fastspeech/decode_fastspeech.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decode trained FastSpeech from folders."""
import argparse
import logging
import os
import sys
sys.path.append(".")
import numpy as np
import tensorflow as tf
import yaml
from tqdm import tqdm
from examples.fastspeech.fastspeech_dataset import CharactorDataset
from tensorflow_tts.configs import FastSpeechConfig
from tensorflow_tts.models import TFFastSpeech
def main():
"""Run fastspeech decoding from folder."""
parser = argparse.ArgumentParser(
description="Decode soft-mel features from charactor with trained FastSpeech "
"(See detail in examples/fastspeech/decode_fastspeech.py)."
)
parser.add_argument(
"--rootdir",
default=None,
type=str,
required=True,
help="directory including ids/durations files.",
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save generated speech."
)
parser.add_argument(
"--checkpoint", type=str, required=True, help="checkpoint file to be loaded."
)
parser.add_argument(
"--config",
default=None,
type=str,
required=True,
help="yaml format configuration file. if not explicitly provided, "
"it will be searched in the checkpoint directory. (default=None)",
)
parser.add_argument(
"--batch-size",
default=8,
type=int,
required=False,
help="Batch size for inference.",
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# load config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
if config["format"] == "npy":
char_query = "*-ids.npy"
char_load_fn = np.load
else:
raise ValueError("Only npy is supported.")
# define data-loader
dataset = CharactorDataset(
root_dir=args.rootdir,
charactor_query=char_query,
charactor_load_fn=char_load_fn,
)
dataset = dataset.create(batch_size=args.batch_size)
# define model and load checkpoint
fastspeech = TFFastSpeech(
config=FastSpeechConfig(**config["fastspeech_params"]), name="fastspeech"
)
fastspeech._build()
fastspeech.load_weights(args.checkpoint)
for data in tqdm(dataset, desc="Decoding"):
utt_ids = data["utt_ids"]
char_ids = data["input_ids"]
# fastspeech inference.
masked_mel_before, masked_mel_after, duration_outputs = fastspeech.inference(
char_ids,
speaker_ids=tf.zeros(shape=[tf.shape(char_ids)[0]], dtype=tf.int32),
speed_ratios=tf.ones(shape=[tf.shape(char_ids)[0]], dtype=tf.float32),
)
# convert to numpy
masked_mel_befores = masked_mel_before.numpy()
masked_mel_afters = masked_mel_after.numpy()
for (utt_id, mel_before, mel_after, durations) in zip(
utt_ids, masked_mel_befores, masked_mel_afters, duration_outputs
):
# real len of mel predicted
real_length = durations.numpy().sum()
utt_id = utt_id.numpy().decode("utf-8")
# save to folder.
np.save(
os.path.join(args.outdir, f"{utt_id}-fs-before-feats.npy"),
mel_before[:real_length, :].astype(np.float32),
allow_pickle=False,
)
np.save(
os.path.join(args.outdir, f"{utt_id}-fs-after-feats.npy"),
mel_after[:real_length, :].astype(np.float32),
allow_pickle=False,
)
if __name__ == "__main__":
main()
| 5,070 | 30.69375 | 87 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/fastspeech/fastspeech_dataset.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset modules."""
import itertools
import logging
import os
import random
import numpy as np
import tensorflow as tf
from tensorflow_tts.datasets.abstract_dataset import AbstractDataset
from tensorflow_tts.utils import find_files
class CharactorDurationMelDataset(AbstractDataset):
"""Tensorflow Charactor Mel dataset."""
def __init__(
self,
root_dir,
charactor_query="*-ids.npy",
mel_query="*-norm-feats.npy",
duration_query="*-durations.npy",
charactor_load_fn=np.load,
mel_load_fn=np.load,
duration_load_fn=np.load,
mel_length_threshold=0,
):
"""Initialize dataset.
Args:
root_dir (str): Root directory including dumped files.
charactor_query (str): Query to find charactor files in root_dir.
mel_query (str): Query to find feature files in root_dir.
duration_query (str): Query to find duration files in root_dir.
charactor_load_fn (func): Function to load charactor file.
mel_load_fn (func): Function to load feature file.
duration_load_fn (func): Function to load duration file.
mel_length_threshold (int): Threshold to remove short feature files.
return_utt_id (bool): Whether to return the utterance id with arrays.
"""
# find all of charactor and mel files.
charactor_files = sorted(find_files(root_dir, charactor_query))
mel_files = sorted(find_files(root_dir, mel_query))
duration_files = sorted(find_files(root_dir, duration_query))
# assert the number of files
assert len(mel_files) != 0, f"Not found any mels files in ${root_dir}."
assert (
len(mel_files) == len(charactor_files) == len(duration_files)
), f"Number of charactor, mel and duration files are different \
({len(mel_files)} vs {len(charactor_files)} vs {len(duration_files)})."
if ".npy" in charactor_query:
suffix = charactor_query[1:]
utt_ids = [os.path.basename(f).replace(suffix, "") for f in charactor_files]
# set global params
self.utt_ids = utt_ids
self.mel_files = mel_files
self.charactor_files = charactor_files
self.duration_files = duration_files
self.mel_load_fn = mel_load_fn
self.charactor_load_fn = charactor_load_fn
self.duration_load_fn = duration_load_fn
self.mel_length_threshold = mel_length_threshold
def get_args(self):
return [self.utt_ids]
def generator(self, utt_ids):
for i, utt_id in enumerate(utt_ids):
mel_file = self.mel_files[i]
charactor_file = self.charactor_files[i]
duration_file = self.duration_files[i]
items = {
"utt_ids": utt_id,
"mel_files": mel_file,
"charactor_files": charactor_file,
"duration_files": duration_file,
}
yield items
@tf.function
def _load_data(self, items):
mel = tf.numpy_function(np.load, [items["mel_files"]], tf.float32)
charactor = tf.numpy_function(np.load, [items["charactor_files"]], tf.int32)
duration = tf.numpy_function(np.load, [items["duration_files"]], tf.int32)
items = {
"utt_ids": items["utt_ids"],
"input_ids": charactor,
"speaker_ids": 0,
"duration_gts": duration,
"mel_gts": mel,
"mel_lengths": len(mel),
}
return items
def create(
self,
allow_cache=False,
batch_size=1,
is_shuffle=False,
map_fn=None,
reshuffle_each_iteration=True,
):
"""Create tf.dataset function."""
output_types = self.get_output_dtypes()
datasets = tf.data.Dataset.from_generator(
self.generator, output_types=output_types, args=(self.get_args())
)
# load data
datasets = datasets.map(
lambda items: self._load_data(items), tf.data.experimental.AUTOTUNE
)
datasets = datasets.filter(
lambda x: x["mel_lengths"] > self.mel_length_threshold
)
if allow_cache:
datasets = datasets.cache()
if is_shuffle:
datasets = datasets.shuffle(
self.get_len_dataset(),
reshuffle_each_iteration=reshuffle_each_iteration,
)
# define padded_shapes
padded_shapes = {
"utt_ids": [],
"input_ids": [None],
"speaker_ids": [],
"duration_gts": [None],
"mel_gts": [None, None],
"mel_lengths": [],
}
datasets = datasets.padded_batch(batch_size, padded_shapes=padded_shapes)
datasets = datasets.prefetch(tf.data.experimental.AUTOTUNE)
return datasets
def get_output_dtypes(self):
output_types = {
"utt_ids": tf.string,
"mel_files": tf.string,
"charactor_files": tf.string,
"duration_files": tf.string,
}
return output_types
def get_len_dataset(self):
return len(self.utt_ids)
def __name__(self):
return "CharactorDurationMelDataset"
class CharactorDataset(AbstractDataset):
"""Tensorflow Charactor dataset."""
def __init__(
self, root_dir, charactor_query="*-ids.npy", charactor_load_fn=np.load,
):
"""Initialize dataset.
Args:
root_dir (str): Root directory including dumped files.
charactor_query (str): Query to find charactor files in root_dir.
charactor_load_fn (func): Function to load charactor file.
return_utt_id (bool): Whether to return the utterance id with arrays.
"""
# find all of charactor and mel files.
charactor_files = sorted(find_files(root_dir, charactor_query))
# assert the number of files
assert (
len(charactor_files) != 0
), f"Not found any char or duration files in ${root_dir}."
if ".npy" in charactor_query:
suffix = charactor_query[1:]
utt_ids = [os.path.basename(f).replace(suffix, "") for f in charactor_files]
# set global params
self.utt_ids = utt_ids
self.charactor_files = charactor_files
self.charactor_load_fn = charactor_load_fn
def get_args(self):
return [self.utt_ids]
def generator(self, utt_ids):
for i, utt_id in enumerate(utt_ids):
charactor_file = self.charactor_files[i]
charactor = self.charactor_load_fn(charactor_file)
items = {"utt_ids": utt_id, "input_ids": charactor}
yield items
def create(
self,
allow_cache=False,
batch_size=1,
is_shuffle=False,
map_fn=None,
reshuffle_each_iteration=True,
):
"""Create tf.dataset function."""
output_types = self.get_output_dtypes()
datasets = tf.data.Dataset.from_generator(
self.generator, output_types=output_types, args=(self.get_args())
)
if allow_cache:
datasets = datasets.cache()
if is_shuffle:
datasets = datasets.shuffle(
self.get_len_dataset(),
reshuffle_each_iteration=reshuffle_each_iteration,
)
# define padded shapes
padded_shapes = {"utt_ids": [], "input_ids": [None]}
datasets = datasets.padded_batch(
batch_size, padded_shapes=padded_shapes, drop_remainder=True
)
datasets = datasets.prefetch(tf.data.experimental.AUTOTUNE)
return datasets
def get_output_dtypes(self):
output_types = {"utt_ids": tf.string, "input_ids": tf.int32}
return output_types
def get_len_dataset(self):
return len(self.utt_ids)
def __name__(self):
return "CharactorDataset"
| 8,659 | 31.80303 | 88 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/mfa_extraction/run_mfa.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runing mfa to extract textgrids."""
from subprocess import call
from pathlib import Path
import click
import os
@click.command()
@click.option("--mfa_path", default=os.path.join('mfa', 'montreal-forced-aligner', 'bin', 'mfa_align'))
@click.option("--corpus_directory", default="libritts")
@click.option("--lexicon", default=os.path.join('mfa', 'lexicon', 'librispeech-lexicon.txt'))
@click.option("--acoustic_model_path", default=os.path.join('mfa', 'montreal-forced-aligner', 'pretrained_models', 'english.zip'))
@click.option("--output_directory", default=os.path.join('mfa', 'parsed'))
@click.option("--jobs", default="8")
def run_mfa(
mfa_path: str,
corpus_directory: str,
lexicon: str,
acoustic_model_path: str,
output_directory: str,
jobs: str,
):
Path(output_directory).mkdir(parents=True, exist_ok=True)
call(
[
f".{os.path.sep}{mfa_path}",
corpus_directory,
lexicon,
acoustic_model_path,
output_directory,
f"-j {jobs}"
]
)
if __name__ == "__main__":
run_mfa()
| 1,714 | 30.759259 | 130 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/mfa_extraction/fix_mismatch.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fix mismatch between sum durations and mel lengths."""
import numpy as np
import os
from tqdm import tqdm
import click
import logging
import sys
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
@click.command()
@click.option("--base_path", default="dump")
@click.option("--trimmed_dur_path", default="dataset/trimmed-durations")
@click.option("--dur_path", default="dataset/durations")
@click.option("--use_norm", default="f")
def fix(base_path: str, dur_path: str, trimmed_dur_path: str, use_norm: str):
for t in ["train", "valid"]:
mfa_longer = []
mfa_shorter = []
big_diff = []
not_fixed = []
pre_path = os.path.join(base_path, t)
os.makedirs(os.path.join(pre_path, "fix_dur"), exist_ok=True)
logging.info(f"FIXING {t} set ...\n")
for i in tqdm(os.listdir(os.path.join(pre_path, "ids"))):
if use_norm == "t":
mel = np.load(
os.path.join(
pre_path, "norm-feats", f"{i.split('-')[0]}-norm-feats.npy"
)
)
else:
mel = np.load(
os.path.join(
pre_path, "raw-feats", f"{i.split('-')[0]}-raw-feats.npy"
)
)
try:
dur = np.load(
os.path.join(trimmed_dur_path, f"{i.split('-')[0]}-durations.npy")
)
except:
dur = np.load(
os.path.join(dur_path, f"{i.split('-')[0]}-durations.npy")
)
l_mel = len(mel)
dur_s = np.sum(dur)
cloned = np.array(dur, copy=True)
diff = abs(l_mel - dur_s)
if abs(l_mel - dur_s) > 30: # more then 300 ms
big_diff.append([i, abs(l_mel - dur_s)])
if dur_s > l_mel:
for j in range(1, len(dur) - 1):
if diff == 0:
break
dur_val = cloned[-j]
if dur_val >= diff:
cloned[-j] -= diff
diff -= dur_val
break
else:
cloned[-j] = 0
diff -= dur_val
if j == len(dur) - 2:
not_fixed.append(i)
mfa_longer.append(abs(l_mel - dur_s))
elif dur_s < l_mel:
cloned[-1] += diff
mfa_shorter.append(abs(l_mel - dur_s))
np.save(
os.path.join(pre_path, "fix_dur", f"{i.split('-')[0]}-durations.npy"),
cloned.astype(np.int32),
allow_pickle=False,
)
logging.info(
f"{t} stats: number of mfa with longer duration: {len(mfa_longer)}, total diff: {sum(mfa_longer)}"
f", mean diff: {sum(mfa_longer)/len(mfa_longer) if len(mfa_longer) > 0 else 0}"
)
logging.info(
f"{t} stats: number of mfa with shorter duration: {len(mfa_shorter)}, total diff: {sum(mfa_shorter)}"
f", mean diff: {sum(mfa_shorter)/len(mfa_shorter) if len(mfa_shorter) > 0 else 0}"
)
logging.info(
f"{t} stats: number of files with a ''big'' duration diff: {len(big_diff)} if number>1 you should check it"
)
logging.info(f"{t} stats: not fixed len: {len(not_fixed)}\n")
if __name__ == "__main__":
fix()
| 4,213 | 33.540984 | 119 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/mfa_extraction/txt_grid_parser.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create training file and durations from textgrids."""
import os
from dataclasses import dataclass
from pathlib import Path
import click
import numpy as np
import textgrid
import yaml
from tqdm import tqdm
import logging
import sys
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
@dataclass
class TxtGridParser:
sample_rate: int
multi_speaker: bool
txt_grid_path: str
hop_size: int
output_durations_path: str
dataset_path: str
training_file: str = "train.txt"
phones_mapper = {"sil": "SIL", "sp": "SIL", "spn": "SIL", "": "END"}
""" '' -> is last token in every cases i encounter so u can change it for END but there is a safety check
so it'll fail always when empty string isn't last char in ur dataset just chang it to silence then
"""
sil_phones = set(phones_mapper.keys())
def parse(self):
speakers = (
[
i
for i in os.listdir(self.txt_grid_path)
if os.path.isdir(os.path.join(self.txt_grid_path, i))
]
if self.multi_speaker
else []
)
data = []
if speakers:
for speaker in speakers:
file_list = os.listdir(os.path.join(self.txt_grid_path, speaker))
self.parse_text_grid(file_list, data, speaker)
else:
file_list = os.listdir(self.txt_grid_path)
self.parse_text_grid(file_list, data, "")
with open(os.path.join(self.dataset_path, self.training_file), "w") as f:
f.writelines(data)
def parse_text_grid(self, file_list: list, data: list, speaker_name: str):
logging.info(
f"\n Parse: {len(file_list)} files, speaker name: {speaker_name} \n"
)
for f_name in tqdm(file_list):
text_grid = textgrid.TextGrid.fromFile(
os.path.join(self.txt_grid_path, speaker_name, f_name)
)
pha = text_grid[1]
durations = []
phs = []
for iterator, interval in enumerate(pha.intervals):
mark = interval.mark
if mark in self.sil_phones:
mark = self.phones_mapper[mark]
if mark == "END":
assert iterator == pha.intervals.__len__() - 1
# check if empty ph is always last example in your dataset if not fix it
dur = interval.duration() * (self.sample_rate / self.hop_size)
durations.append(round(dur))
phs.append(mark)
full_ph = " ".join(phs)
assert full_ph.split(" ").__len__() == durations.__len__() # safety check
base_name = f_name.split(".TextGrid")[0]
np.save(
os.path.join(self.output_durations_path, f"{base_name}-durations.npy"),
np.array(durations).astype(np.int32),
allow_pickle=False,
)
data.append(f"{speaker_name}/{base_name}|{full_ph}|{speaker_name}\n")
@click.command()
@click.option(
"--yaml_path", default="examples/fastspeech2_libritts/conf/fastspeech2libritts.yaml"
)
@click.option("--dataset_path", default="dataset", type=str, help="Dataset directory")
@click.option("--text_grid_path", default="mfa/parsed", type=str)
@click.option("--output_durations_path", default="dataset/durations")
@click.option("--sample_rate", default=24000, type=int)
@click.option("--multi_speakers", default=1, type=int, help="Use multi-speaker version")
@click.option("--train_file", default="train.txt")
def main(
yaml_path: str,
dataset_path: str,
text_grid_path: str,
output_durations_path: str,
sample_rate: int,
multi_speakers: int,
train_file: str,
):
with open(yaml_path) as file:
attrs = yaml.load(file)
hop_size = attrs["hop_size"]
Path(output_durations_path).mkdir(parents=True, exist_ok=True)
txt_grid_parser = TxtGridParser(
sample_rate=sample_rate,
multi_speaker=bool(multi_speakers),
txt_grid_path=text_grid_path,
hop_size=hop_size,
output_durations_path=output_durations_path,
training_file=train_file,
dataset_path=dataset_path,
)
txt_grid_parser.parse()
if __name__ == "__main__":
main()
| 5,040 | 31.947712 | 109 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/fastspeech2_libritts/fastspeech2_dataset.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset modules."""
import os
import numpy as np
import tensorflow as tf
from tensorflow_tts.datasets.abstract_dataset import AbstractDataset
from tensorflow_tts.utils import find_files
def average_by_duration(x, durs):
mel_len = durs.sum()
durs_cum = np.cumsum(np.pad(durs, (1, 0)))
# calculate charactor f0/energy
x_char = np.zeros((durs.shape[0],), dtype=np.float32)
for idx, start, end in zip(range(mel_len), durs_cum[:-1], durs_cum[1:]):
values = x[start:end][np.where(x[start:end] != 0.0)[0]]
x_char[idx] = np.mean(values) if len(values) > 0 else 0.0 # np.mean([]) = nan.
return x_char.astype(np.float32)
def tf_average_by_duration(x, durs):
outs = tf.numpy_function(average_by_duration, [x, durs], tf.float32)
return outs
class CharactorDurationF0EnergyMelDataset(AbstractDataset):
"""Tensorflow Charactor Duration F0 Energy Mel dataset."""
def __init__(
self,
root_dir,
charactor_query="*-ids.npy",
mel_query="*-norm-feats.npy",
duration_query="*-durations.npy",
f0_query="*-raw-f0.npy",
energy_query="*-raw-energy.npy",
f0_stat="./dump/stats_f0.npy",
energy_stat="./dump/stats_energy.npy",
charactor_load_fn=np.load,
mel_load_fn=np.load,
duration_load_fn=np.load,
f0_load_fn=np.load,
energy_load_fn=np.load,
mel_length_threshold=0,
speakers_map=None
):
"""Initialize dataset.
Args:
root_dir (str): Root directory including dumped files.
charactor_query (str): Query to find charactor files in root_dir.
mel_query (str): Query to find feature files in root_dir.
duration_query (str): Query to find duration files in root_dir.
f0_query (str): Query to find f0 files in root_dir.
energy_query (str): Query to find energy files in root_dir.
f0_stat (str): str path of f0_stat.
energy_stat (str): str path of energy_stat.
charactor_load_fn (func): Function to load charactor file.
mel_load_fn (func): Function to load feature file.
duration_load_fn (func): Function to load duration file.
f0_load_fn (func): Function to load f0 file.
energy_load_fn (func): Function to load energy file.
mel_length_threshold (int): Threshold to remove short feature files.
speakers_map (dict): Speakers map generated in dataset preprocessing
"""
# find all of charactor and mel files.
charactor_files = sorted(find_files(root_dir, charactor_query))
mel_files = sorted(find_files(root_dir, mel_query))
duration_files = sorted(find_files(root_dir, duration_query))
f0_files = sorted(find_files(root_dir, f0_query))
energy_files = sorted(find_files(root_dir, energy_query))
# assert the number of files
assert len(mel_files) != 0, f"Not found any mels files in ${root_dir}."
assert (
len(mel_files)
== len(charactor_files)
== len(duration_files)
== len(f0_files)
== len(energy_files)
), f"Number of charactor, mel, duration, f0 and energy files are different"
assert speakers_map != None, f"No speakers map found. Did you set --dataset_mapping?"
if ".npy" in charactor_query:
suffix = charactor_query[1:]
utt_ids = [os.path.basename(f).replace(suffix, "") for f in charactor_files]
# set global params
self.utt_ids = utt_ids
self.mel_files = mel_files
self.charactor_files = charactor_files
self.duration_files = duration_files
self.f0_files = f0_files
self.energy_files = energy_files
self.mel_load_fn = mel_load_fn
self.charactor_load_fn = charactor_load_fn
self.duration_load_fn = duration_load_fn
self.f0_load_fn = f0_load_fn
self.energy_load_fn = energy_load_fn
self.mel_length_threshold = mel_length_threshold
self.speakers_map = speakers_map
self.speakers = [self.speakers_map[i.split("_")[0]] for i in self.utt_ids]
print("Speaker: utt_id", list(zip(self.speakers, self.utt_ids)))
self.f0_stat = np.load(f0_stat)
self.energy_stat = np.load(energy_stat)
def get_args(self):
return [self.utt_ids]
def _norm_mean_std(self, x, mean, std):
zero_idxs = np.where(x == 0.0)[0]
x = (x - mean) / std
x[zero_idxs] = 0.0
return x
def _norm_mean_std_tf(self, x, mean, std):
x = tf.numpy_function(self._norm_mean_std, [x, mean, std], tf.float32)
return x
def generator(self, utt_ids):
for i, utt_id in enumerate(utt_ids):
mel_file = self.mel_files[i]
charactor_file = self.charactor_files[i]
duration_file = self.duration_files[i]
f0_file = self.f0_files[i]
energy_file = self.energy_files[i]
speaker_id = self.speakers[i]
items = {
"utt_ids": utt_id,
"mel_files": mel_file,
"charactor_files": charactor_file,
"duration_files": duration_file,
"f0_files": f0_file,
"energy_files": energy_file,
"speaker_ids": speaker_id,
}
yield items
@tf.function
def _load_data(self, items):
mel = tf.numpy_function(np.load, [items["mel_files"]], tf.float32)
charactor = tf.numpy_function(np.load, [items["charactor_files"]], tf.int32)
duration = tf.numpy_function(np.load, [items["duration_files"]], tf.int32)
f0 = tf.numpy_function(np.load, [items["f0_files"]], tf.float32)
energy = tf.numpy_function(np.load, [items["energy_files"]], tf.float32)
f0 = self._norm_mean_std_tf(f0, self.f0_stat[0], self.f0_stat[1])
energy = self._norm_mean_std_tf(
energy, self.energy_stat[0], self.energy_stat[1]
)
# calculate charactor f0/energy
f0 = tf_average_by_duration(f0, duration)
energy = tf_average_by_duration(energy, duration)
items = {
"utt_ids": items["utt_ids"],
"input_ids": charactor,
"speaker_ids": items["speaker_ids"],
"duration_gts": duration,
"f0_gts": f0,
"energy_gts": energy,
"mel_gts": mel,
"mel_lengths": len(mel),
}
return items
def create(
self,
allow_cache=False,
batch_size=1,
is_shuffle=False,
map_fn=None,
reshuffle_each_iteration=True,
):
"""Create tf.dataset function."""
output_types = self.get_output_dtypes()
datasets = tf.data.Dataset.from_generator(
self.generator, output_types=output_types, args=(self.get_args())
)
# load data
datasets = datasets.map(
lambda items: self._load_data(items), tf.data.experimental.AUTOTUNE
)
datasets = datasets.filter(
lambda x: x["mel_lengths"] > self.mel_length_threshold
)
if allow_cache:
datasets = datasets.cache()
if is_shuffle:
datasets = datasets.shuffle(
self.get_len_dataset(),
reshuffle_each_iteration=reshuffle_each_iteration,
)
# define padded shapes
padded_shapes = {
"utt_ids": [],
"input_ids": [None],
"speaker_ids": [],
"duration_gts": [None],
"f0_gts": [None],
"energy_gts": [None],
"mel_gts": [None, None],
"mel_lengths": [],
}
datasets = datasets.padded_batch(
batch_size, padded_shapes=padded_shapes, drop_remainder=True
)
datasets = datasets.prefetch(tf.data.experimental.AUTOTUNE)
return datasets
def get_output_dtypes(self):
output_types = {
"utt_ids": tf.string,
"mel_files": tf.string,
"charactor_files": tf.string,
"duration_files": tf.string,
"f0_files": tf.string,
"energy_files": tf.string,
"speaker_ids": tf.int32,
}
return output_types
def get_len_dataset(self):
return len(self.utt_ids)
def __name__(self):
return "CharactorDurationF0EnergyMelDataset"
| 9,146 | 34.59144 | 93 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/fastspeech2_libritts/train_fastspeech2.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train FastSpeech2."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import numpy as np
import yaml
import json
import tensorflow_tts
from examples.fastspeech2_libritts.fastspeech2_dataset import (
CharactorDurationF0EnergyMelDataset,
)
from tensorflow_tts.configs import FastSpeech2Config
from tensorflow_tts.models import TFFastSpeech2
from tensorflow_tts.optimizers import AdamWeightDecay, WarmUp
from tensorflow_tts.trainers import Seq2SeqBasedTrainer
from tensorflow_tts.utils import (
calculate_2d_loss,
calculate_3d_loss,
return_strategy,
TFGriffinLim,
)
class FastSpeech2Trainer(Seq2SeqBasedTrainer):
"""FastSpeech2 Trainer class based on FastSpeechTrainer."""
def __init__(
self,
config,
strategy,
steps=0,
epochs=0,
is_mixed_precision=False,
stats_path: str = "",
dataset_config: str = "",
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_mixed_precision (bool): Use mixed precision or not.
"""
super(FastSpeech2Trainer, self).__init__(
steps=steps,
epochs=epochs,
config=config,
strategy=strategy,
is_mixed_precision=is_mixed_precision,
)
# define metrics to aggregates data and use tf.summary logs them
self.list_metrics_name = [
"duration_loss",
"f0_loss",
"energy_loss",
"mel_loss_before",
"mel_loss_after",
]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
self.use_griffin = config.get("use_griffin", False)
self.griffin_lim_tf = None
if self.use_griffin:
logging.info(
f"Load griff stats from {stats_path} and config from {dataset_config}"
)
self.griff_conf = yaml.load(open(dataset_config), Loader=yaml.Loader)
self.prepare_grim(stats_path, self.griff_conf)
def prepare_grim(self, stats_path, config):
if not stats_path:
raise KeyError("stats path need to exist")
self.griffin_lim_tf = TFGriffinLim(stats_path, config)
def compile(self, model, optimizer):
super().compile(model, optimizer)
self.mse = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE
)
self.mae = tf.keras.losses.MeanAbsoluteError(
reduction=tf.keras.losses.Reduction.NONE
)
def compute_per_example_losses(self, batch, outputs):
"""Compute per example losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
mel_before, mel_after, duration_outputs, f0_outputs, energy_outputs = outputs
log_duration = tf.math.log(
tf.cast(tf.math.add(batch["duration_gts"], 1), tf.float32)
)
duration_loss = calculate_2d_loss(log_duration, duration_outputs, self.mse)
f0_loss = calculate_2d_loss(batch["f0_gts"], f0_outputs, self.mse)
energy_loss = calculate_2d_loss(batch["energy_gts"], energy_outputs, self.mse)
mel_loss_before = calculate_3d_loss(batch["mel_gts"], mel_before, self.mae)
mel_loss_after = calculate_3d_loss(batch["mel_gts"], mel_after, self.mae)
per_example_losses = (
duration_loss + f0_loss + energy_loss + mel_loss_before + mel_loss_after
)
dict_metrics_losses = {
"duration_loss": duration_loss,
"f0_loss": f0_loss,
"energy_loss": energy_loss,
"mel_loss_before": mel_loss_before,
"mel_loss_after": mel_loss_after,
}
return per_example_losses, dict_metrics_losses
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
import matplotlib.pyplot as plt
# predict with tf.function.
outputs = self.one_step_predict(batch)
mels_before, mels_after, *_ = outputs
mel_gts = batch["mel_gts"]
utt_ids = batch["utt_ids"]
# convert to tensor.
# here we just take a sample at first replica.
try:
mels_before = mels_before.values[0].numpy()
mels_after = mels_after.values[0].numpy()
mel_gts = mel_gts.values[0].numpy()
utt_ids = utt_ids.values[0].numpy()
except Exception:
mels_before = mels_before.numpy()
mels_after = mels_after.numpy()
mel_gts = mel_gts.numpy()
utt_ids = utt_ids.numpy()
# check directory
if self.use_griffin:
griff_dir_name = os.path.join(
self.config["outdir"], f"predictions/{self.steps}_wav"
)
if not os.path.exists(griff_dir_name):
os.makedirs(griff_dir_name)
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (mel_gt, mel_before, mel_after) in enumerate(
zip(mel_gts, mels_before, mels_after), 0
):
if self.use_griffin:
utt_id = utt_ids[idx]
grif_before = self.griffin_lim_tf(
tf.reshape(mel_before, [-1, 80])[tf.newaxis, :], n_iter=32
)
grif_after = self.griffin_lim_tf(
tf.reshape(mel_after, [-1, 80])[tf.newaxis, :], n_iter=32
)
grif_gt = self.griffin_lim_tf(
tf.reshape(mel_gt, [-1, 80])[tf.newaxis, :], n_iter=32
)
self.griffin_lim_tf.save_wav(
grif_before, griff_dir_name, f"{utt_id}_before"
)
self.griffin_lim_tf.save_wav(
grif_after, griff_dir_name, f"{utt_id}_after"
)
self.griffin_lim_tf.save_wav(grif_gt, griff_dir_name, f"{utt_id}_gt")
utt_id = utt_ids[idx]
mel_gt = tf.reshape(mel_gt, (-1, 80)).numpy() # [length, 80]
mel_before = tf.reshape(mel_before, (-1, 80)).numpy() # [length, 80]
mel_after = tf.reshape(mel_after, (-1, 80)).numpy() # [length, 80]
# plit figure and save it
figname = os.path.join(dirname, f"{utt_id}.png")
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
im = ax1.imshow(np.rot90(mel_gt), aspect="auto", interpolation="none")
ax1.set_title("Target Mel-Spectrogram")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax1)
ax2.set_title("Predicted Mel-before-Spectrogram")
im = ax2.imshow(np.rot90(mel_before), aspect="auto", interpolation="none")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax2)
ax3.set_title("Predicted Mel-after-Spectrogram")
im = ax3.imshow(np.rot90(mel_after), aspect="auto", interpolation="none")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax3)
plt.tight_layout()
plt.savefig(figname)
plt.close()
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train FastSpeech (See detail in tensorflow_tts/bin/train-fastspeech.py)"
)
parser.add_argument(
"--train-dir",
default="dump/train",
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default="dump/valid",
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="usr norm-mels for train or raw."
)
parser.add_argument(
"--f0-stat", default="./dump/stats_f0.npy", type=str, help="f0-stat path.",
)
parser.add_argument(
"--energy-stat",
default="./dump/stats_energy.npy",
type=str,
help="energy-stat path.",
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--mixed_precision",
default=1,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--dataset_config", default="preprocess/libritts_preprocess.yaml", type=str,
)
parser.add_argument(
"--dataset_stats", default="dump/stats.npy", type=str,
)
parser.add_argument(
"--dataset_mapping", default="dump/libritts_mapper.npy", type=str,
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="pretrained weights .h5 file to load weights from. Auto-skips non-matching layers",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.mixed_precision = bool(args.mixed_precision)
args.use_norm = bool(args.use_norm)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["mel_length_threshold"]
else:
mel_length_threshold = None
if config["format"] == "npy":
charactor_query = "*-ids.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
duration_query = "*-durations.npy"
f0_query = "*-raw-f0.npy"
energy_query = "*-raw-energy.npy"
else:
raise ValueError("Only npy are supported.")
# load speakers map from dataset map
with open(args.dataset_mapping) as f:
dataset_mapping = json.load(f)
speakers_map = dataset_mapping["speakers_map"]
# Check n_speakers matches number of speakers in speakers_map
n_speakers = config["fastspeech2_params"]["n_speakers"]
assert n_speakers == len(
speakers_map
), f"Number of speakers in dataset does not match n_speakers in config"
# define train/valid dataset
train_dataset = CharactorDurationF0EnergyMelDataset(
root_dir=args.train_dir,
charactor_query=charactor_query,
mel_query=mel_query,
duration_query=duration_query,
f0_query=f0_query,
energy_query=energy_query,
f0_stat=args.f0_stat,
energy_stat=args.energy_stat,
mel_length_threshold=mel_length_threshold,
speakers_map=speakers_map,
).create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = CharactorDurationF0EnergyMelDataset(
root_dir=args.dev_dir,
charactor_query=charactor_query,
mel_query=mel_query,
duration_query=duration_query,
f0_query=f0_query,
energy_query=energy_query,
f0_stat=args.f0_stat,
energy_stat=args.energy_stat,
mel_length_threshold=mel_length_threshold,
speakers_map=speakers_map,
).create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = FastSpeech2Trainer(
config=config,
strategy=STRATEGY,
steps=0,
epochs=0,
is_mixed_precision=args.mixed_precision,
stats_path=args.dataset_stats,
dataset_config=args.dataset_config,
)
with STRATEGY.scope():
# define model
fastspeech = TFFastSpeech2(
config=FastSpeech2Config(**config["fastspeech2_params"])
)
fastspeech._build()
fastspeech.summary()
if len(args.pretrained) > 1:
fastspeech.load_weights(args.pretrained, by_name=True, skip_mismatch=True)
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
# AdamW for fastspeech
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_steps=config["optimizer_params"]["decay_steps"],
end_learning_rate=config["optimizer_params"]["end_learning_rate"],
)
learning_rate_fn = WarmUp(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_schedule_fn=learning_rate_fn,
warmup_steps=int(
config["train_max_steps"]
* config["optimizer_params"]["warmup_proportion"]
),
)
optimizer = AdamWeightDecay(
learning_rate=learning_rate_fn,
weight_decay_rate=config["optimizer_params"]["weight_decay"],
beta_1=0.9,
beta_2=0.98,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"],
)
_ = optimizer.iterations
# compile trainer
trainer.compile(model=fastspeech, optimizer=optimizer)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 17,059 | 33.816327 | 96 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/melgan/train_melgan.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train MelGAN."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import numpy as np
import soundfile as sf
import yaml
from tqdm import tqdm
import tensorflow_tts
import tensorflow_tts.configs.melgan as MELGAN_CONFIG
from examples.melgan.audio_mel_dataset import AudioMelDataset
from tensorflow_tts.losses import TFMelSpectrogram
from tensorflow_tts.models import TFMelGANGenerator, TFMelGANMultiScaleDiscriminator
from tensorflow_tts.trainers import GanBasedTrainer
from tensorflow_tts.utils import calculate_2d_loss, calculate_3d_loss, return_strategy
class MelganTrainer(GanBasedTrainer):
"""Melgan Trainer class based on GanBasedTrainer."""
def __init__(
self,
config,
strategy,
steps=0,
epochs=0,
is_generator_mixed_precision=False,
is_discriminator_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_generator_mixed_precision (bool): Use mixed precision for generator or not.
is_discriminator_mixed_precision (bool): Use mixed precision for discriminator or not.
"""
super(MelganTrainer, self).__init__(
steps,
epochs,
config,
strategy,
is_generator_mixed_precision,
is_discriminator_mixed_precision,
)
# define metrics to aggregates data and use tf.summary logs them
self.list_metrics_name = [
"adversarial_loss",
"fm_loss",
"gen_loss",
"real_loss",
"fake_loss",
"dis_loss",
"mels_spectrogram_loss",
]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
self.config = config
def compile(self, gen_model, dis_model, gen_optimizer, dis_optimizer):
super().compile(gen_model, dis_model, gen_optimizer, dis_optimizer)
# define loss
self.mse_loss = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE
)
self.mae_loss = tf.keras.losses.MeanAbsoluteError(
reduction=tf.keras.losses.Reduction.NONE
)
self.mels_loss = TFMelSpectrogram()
def compute_per_example_generator_losses(self, batch, outputs):
"""Compute per example generator losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
audios = batch["audios"]
y_hat = outputs
p_hat = self._discriminator(y_hat)
p = self._discriminator(tf.expand_dims(audios, 2))
adv_loss = 0.0
for i in range(len(p_hat)):
adv_loss += calculate_3d_loss(
tf.ones_like(p_hat[i][-1]), p_hat[i][-1], loss_fn=self.mse_loss
)
adv_loss /= i + 1
# define feature-matching loss
fm_loss = 0.0
for i in range(len(p_hat)):
for j in range(len(p_hat[i]) - 1):
fm_loss += calculate_3d_loss(
p[i][j], p_hat[i][j], loss_fn=self.mae_loss
)
fm_loss /= (i + 1) * (j + 1)
adv_loss += self.config["lambda_feat_match"] * fm_loss
per_example_losses = adv_loss
dict_metrics_losses = {
"adversarial_loss": adv_loss,
"fm_loss": fm_loss,
"gen_loss": adv_loss,
"mels_spectrogram_loss": calculate_2d_loss(
audios, tf.squeeze(y_hat, -1), loss_fn=self.mels_loss
),
}
return per_example_losses, dict_metrics_losses
def compute_per_example_discriminator_losses(self, batch, gen_outputs):
audios = batch["audios"]
y_hat = gen_outputs
y = tf.expand_dims(audios, 2)
p = self._discriminator(y)
p_hat = self._discriminator(y_hat)
real_loss = 0.0
fake_loss = 0.0
for i in range(len(p)):
real_loss += calculate_3d_loss(
tf.ones_like(p[i][-1]), p[i][-1], loss_fn=self.mse_loss
)
fake_loss += calculate_3d_loss(
tf.zeros_like(p_hat[i][-1]), p_hat[i][-1], loss_fn=self.mse_loss
)
real_loss /= i + 1
fake_loss /= i + 1
dis_loss = real_loss + fake_loss
# calculate per_example_losses and dict_metrics_losses
per_example_losses = dis_loss
dict_metrics_losses = {
"real_loss": real_loss,
"fake_loss": fake_loss,
"dis_loss": dis_loss,
}
return per_example_losses, dict_metrics_losses
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
import matplotlib.pyplot as plt
# generate
y_batch_ = self.one_step_predict(batch)
y_batch = batch["audios"]
utt_ids = batch["utt_ids"]
# convert to tensor.
# here we just take a sample at first replica.
try:
y_batch_ = y_batch_.values[0].numpy()
y_batch = y_batch.values[0].numpy()
utt_ids = utt_ids.values[0].numpy()
except Exception:
y_batch_ = y_batch_.numpy()
y_batch = y_batch.numpy()
utt_ids = utt_ids.numpy()
# check directory
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (y, y_) in enumerate(zip(y_batch, y_batch_), 0):
# convert to ndarray
y, y_ = tf.reshape(y, [-1]).numpy(), tf.reshape(y_, [-1]).numpy()
# plit figure and save it
utt_id = utt_ids[idx]
figname = os.path.join(dirname, f"{utt_id}.png")
plt.subplot(2, 1, 1)
plt.plot(y)
plt.title("groundtruth speech")
plt.subplot(2, 1, 2)
plt.plot(y_)
plt.title(f"generated speech @ {self.steps} steps")
plt.tight_layout()
plt.savefig(figname)
plt.close()
# save as wavefile
y = np.clip(y, -1, 1)
y_ = np.clip(y_, -1, 1)
sf.write(
figname.replace(".png", "_ref.wav"),
y,
self.config["sampling_rate"],
"PCM_16",
)
sf.write(
figname.replace(".png", "_gen.wav"),
y_,
self.config["sampling_rate"],
"PCM_16",
)
def collater(
items,
batch_max_steps=tf.constant(8192, dtype=tf.int32),
hop_size=tf.constant(256, dtype=tf.int32),
):
"""Initialize collater (mapping function) for Tensorflow Audio-Mel Dataset.
Args:
batch_max_steps (int): The maximum length of input signal in batch.
hop_size (int): Hop size of auxiliary features.
"""
audio, mel = items["audios"], items["mels"]
if batch_max_steps is None:
batch_max_steps = (tf.shape(audio)[0] // hop_size) * hop_size
batch_max_frames = batch_max_steps // hop_size
if len(audio) < len(mel) * hop_size:
audio = tf.pad(audio, [[0, len(mel) * hop_size - len(audio)]])
if len(mel) > batch_max_frames:
# randomly pickup with the batch_max_steps length of the part
interval_start = 0
interval_end = len(mel) - batch_max_frames
start_frame = tf.random.uniform(
shape=[], minval=interval_start, maxval=interval_end, dtype=tf.int32
)
start_step = start_frame * hop_size
audio = audio[start_step : start_step + batch_max_steps]
mel = mel[start_frame : start_frame + batch_max_frames, :]
else:
audio = tf.pad(audio, [[0, batch_max_steps - len(audio)]])
mel = tf.pad(mel, [[0, batch_max_frames - len(mel)], [0, 0]])
items = {
"utt_ids": items["utt_ids"],
"audios": audio,
"mels": mel,
"mel_lengths": len(mel),
"audio_lengths": len(audio),
}
return items
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train MelGAN (See detail in tensorflow_tts/bin/train-melgan.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="use norm mels for training or raw."
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--generator_mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--discriminator_mixed_precision",
default=0,
type=int,
help="using mixed precision for discriminator or not.",
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="path of .h5 melgan generator to load weights from",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.generator_mixed_precision == 1 or args.discriminator_mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.generator_mixed_precision = bool(args.generator_mixed_precision)
args.discriminator_mixed_precision = bool(args.discriminator_mixed_precision)
args.use_norm = bool(args.use_norm)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify either --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["batch_max_steps"] // config[
"hop_size"
] + 2 * config["melgan_generator_params"].get("aux_context_window", 0)
else:
mel_length_threshold = None
if config["format"] == "npy":
audio_query = "*-wave.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
audio_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy are supported.")
# define train/valid dataset
train_dataset = AudioMelDataset(
root_dir=args.train_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(config["batch_max_steps"], dtype=tf.int32),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = AudioMelDataset(
root_dir=args.dev_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(
config["batch_max_steps_valid"], dtype=tf.int32
),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = MelganTrainer(
steps=0,
epochs=0,
config=config,
strategy=STRATEGY,
is_generator_mixed_precision=args.generator_mixed_precision,
is_discriminator_mixed_precision=args.discriminator_mixed_precision,
)
# define generator and discriminator
with STRATEGY.scope():
generator = TFMelGANGenerator(
MELGAN_CONFIG.MelGANGeneratorConfig(**config["melgan_generator_params"]),
name="melgan_generator",
)
discriminator = TFMelGANMultiScaleDiscriminator(
MELGAN_CONFIG.MelGANDiscriminatorConfig(
**config["melgan_discriminator_params"]
),
name="melgan_discriminator",
)
# dummy input to build model.
fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)
y_hat = generator(fake_mels)
discriminator(y_hat)
if len(args.pretrained) > 1:
generator.load_weights(args.pretrained)
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
generator.summary()
discriminator.summary()
gen_optimizer = tf.keras.optimizers.Adam(**config["generator_optimizer_params"])
dis_optimizer = tf.keras.optimizers.Adam(
**config["discriminator_optimizer_params"]
)
trainer.compile(
gen_model=generator,
dis_model=discriminator,
gen_optimizer=gen_optimizer,
dis_optimizer=dis_optimizer,
)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 16,989 | 31.48566 | 98 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/melgan/audio_mel_dataset.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset modules."""
import logging
import os
import numpy as np
import tensorflow as tf
from tensorflow_tts.datasets.abstract_dataset import AbstractDataset
from tensorflow_tts.utils import find_files
class AudioMelDataset(AbstractDataset):
"""Tensorflow Audio Mel dataset."""
def __init__(
self,
root_dir,
audio_query="*-wave.npy",
mel_query="*-raw-feats.npy",
audio_load_fn=np.load,
mel_load_fn=np.load,
audio_length_threshold=0,
mel_length_threshold=0,
):
"""Initialize dataset.
Args:
root_dir (str): Root directory including dumped files.
audio_query (str): Query to find audio files in root_dir.
mel_query (str): Query to find feature files in root_dir.
audio_load_fn (func): Function to load audio file.
mel_load_fn (func): Function to load feature file.
audio_length_threshold (int): Threshold to remove short audio files.
mel_length_threshold (int): Threshold to remove short feature files.
return_utt_id (bool): Whether to return the utterance id with arrays.
"""
# find all of audio and mel files.
audio_files = sorted(find_files(root_dir, audio_query))
mel_files = sorted(find_files(root_dir, mel_query))
# assert the number of files
assert len(audio_files) != 0, f"Not found any audio files in ${root_dir}."
assert len(audio_files) == len(
mel_files
), f"Number of audio and mel files are different ({len(audio_files)} vs {len(mel_files)})."
if ".npy" in audio_query:
suffix = audio_query[1:]
utt_ids = [os.path.basename(f).replace(suffix, "") for f in audio_files]
# set global params
self.utt_ids = utt_ids
self.audio_files = audio_files
self.mel_files = mel_files
self.audio_load_fn = audio_load_fn
self.mel_load_fn = mel_load_fn
self.audio_length_threshold = audio_length_threshold
self.mel_length_threshold = mel_length_threshold
def get_args(self):
return [self.utt_ids]
def generator(self, utt_ids):
for i, utt_id in enumerate(utt_ids):
audio_file = self.audio_files[i]
mel_file = self.mel_files[i]
items = {
"utt_ids": utt_id,
"audio_files": audio_file,
"mel_files": mel_file,
}
yield items
@tf.function
def _load_data(self, items):
audio = tf.numpy_function(np.load, [items["audio_files"]], tf.float32)
mel = tf.numpy_function(np.load, [items["mel_files"]], tf.float32)
items = {
"utt_ids": items["utt_ids"],
"audios": audio,
"mels": mel,
"mel_lengths": len(mel),
"audio_lengths": len(audio),
}
return items
def create(
self,
allow_cache=False,
batch_size=1,
is_shuffle=False,
map_fn=None,
reshuffle_each_iteration=True,
):
"""Create tf.dataset function."""
output_types = self.get_output_dtypes()
datasets = tf.data.Dataset.from_generator(
self.generator, output_types=output_types, args=(self.get_args())
)
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
datasets = datasets.with_options(options)
# load dataset
datasets = datasets.map(
lambda items: self._load_data(items), tf.data.experimental.AUTOTUNE
)
datasets = datasets.filter(
lambda x: x["mel_lengths"] > self.mel_length_threshold
)
datasets = datasets.filter(
lambda x: x["audio_lengths"] > self.audio_length_threshold
)
if allow_cache:
datasets = datasets.cache()
if is_shuffle:
datasets = datasets.shuffle(
self.get_len_dataset(),
reshuffle_each_iteration=reshuffle_each_iteration,
)
if batch_size > 1 and map_fn is None:
raise ValueError("map function must define when batch_size > 1.")
if map_fn is not None:
datasets = datasets.map(map_fn, tf.data.experimental.AUTOTUNE)
# define padded shapes
padded_shapes = {
"utt_ids": [],
"audios": [None],
"mels": [None, 80],
"mel_lengths": [],
"audio_lengths": [],
}
# define padded values
padding_values = {
"utt_ids": "",
"audios": 0.0,
"mels": 0.0,
"mel_lengths": 0,
"audio_lengths": 0,
}
datasets = datasets.padded_batch(
batch_size,
padded_shapes=padded_shapes,
padding_values=padding_values,
drop_remainder=True,
)
datasets = datasets.prefetch(tf.data.experimental.AUTOTUNE)
return datasets
def get_output_dtypes(self):
output_types = {
"utt_ids": tf.string,
"audio_files": tf.string,
"mel_files": tf.string,
}
return output_types
def get_len_dataset(self):
return len(self.utt_ids)
def __name__(self):
return "AudioMelDataset"
| 6,057 | 31.223404 | 100 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/melgan/decode_melgan.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decode trained Melgan from folder."""
import argparse
import logging
import os
import sys
sys.path.append(".")
import numpy as np
import soundfile as sf
import yaml
from tqdm import tqdm
from tensorflow_tts.configs import MelGANGeneratorConfig
from tensorflow_tts.datasets import MelDataset
from tensorflow_tts.models import TFMelGANGenerator
def main():
"""Run melgan decoding from folder."""
parser = argparse.ArgumentParser(
description="Generate Audio from melspectrogram with trained melgan "
"(See detail in example/melgan/decode_melgan.py)."
)
parser.add_argument(
"--rootdir",
default=None,
type=str,
required=True,
help="directory including ids/durations files.",
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save generated speech."
)
parser.add_argument(
"--checkpoint", type=str, required=True, help="checkpoint file to be loaded."
)
parser.add_argument(
"--use-norm", type=int, default=1, help="Use norm or raw melspectrogram."
)
parser.add_argument("--batch-size", type=int, default=8, help="batch_size.")
parser.add_argument(
"--config",
default=None,
type=str,
required=True,
help="yaml format configuration file. if not explicitly provided, "
"it will be searched in the checkpoint directory. (default=None)",
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# load config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
if config["format"] == "npy":
mel_query = "*-norm-feats.npy" if args.use_norm == 1 else "*-raw-feats.npy"
mel_load_fn = np.load
else:
raise ValueError("Only npy is supported.")
# define data-loader
dataset = MelDataset(
root_dir=args.rootdir,
mel_query=mel_query,
mel_load_fn=mel_load_fn,
)
dataset = dataset.create(batch_size=args.batch_size)
# define model and load checkpoint
melgan = TFMelGANGenerator(
config=MelGANGeneratorConfig(**config["melgan_generator_params"]), name="melgan_generator"
)
melgan._build()
melgan.load_weights(args.checkpoint)
for data in tqdm(dataset, desc="[Decoding]"):
utt_ids, mels, mel_lengths = data["utt_ids"], data["mels"], data["mel_lengths"]
# melgan inference.
generated_audios = melgan(mels)
# convert to numpy.
generated_audios = generated_audios.numpy() # [B, T]
# save to outdir
for i, audio in enumerate(generated_audios):
utt_id = utt_ids[i].numpy().decode("utf-8")
sf.write(
os.path.join(args.outdir, f"{utt_id}.wav"),
audio[: mel_lengths[i].numpy() * config["hop_size"]],
config["sampling_rate"],
"PCM_16",
)
if __name__ == "__main__":
main()
| 4,458 | 30.401408 | 98 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/melgan_stft/train_melgan_stft.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train MelGAN Multi Resolution STFT Loss."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import numpy as np
import yaml
import tensorflow_tts
import tensorflow_tts.configs.melgan as MELGAN_CONFIG
from examples.melgan.audio_mel_dataset import AudioMelDataset
from examples.melgan.train_melgan import MelganTrainer, collater
from tensorflow_tts.losses import TFMultiResolutionSTFT
from tensorflow_tts.models import TFMelGANGenerator, TFMelGANMultiScaleDiscriminator
from tensorflow_tts.utils import calculate_2d_loss, calculate_3d_loss, return_strategy
class MultiSTFTMelganTrainer(MelganTrainer):
"""Multi STFT Melgan Trainer class based on MelganTrainer."""
def __init__(
self,
config,
strategy,
steps=0,
epochs=0,
is_generator_mixed_precision=False,
is_discriminator_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_generator_mixed_precision (bool): Use mixed precision for generator or not.
is_discriminator_mixed_precision (bool): Use mixed precision for discriminator or not.
"""
super(MultiSTFTMelganTrainer, self).__init__(
config=config,
steps=steps,
epochs=epochs,
strategy=strategy,
is_generator_mixed_precision=is_generator_mixed_precision,
is_discriminator_mixed_precision=is_discriminator_mixed_precision,
)
self.list_metrics_name = [
"adversarial_loss",
"fm_loss",
"gen_loss",
"real_loss",
"fake_loss",
"dis_loss",
"spectral_convergence_loss",
"log_magnitude_loss",
]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
def compile(self, gen_model, dis_model, gen_optimizer, dis_optimizer):
super().compile(gen_model, dis_model, gen_optimizer, dis_optimizer)
# define loss
self.stft_loss = TFMultiResolutionSTFT(**self.config["stft_loss_params"])
def compute_per_example_generator_losses(self, batch, outputs):
"""Compute per example generator losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
dict_metrics_losses = {}
per_example_losses = 0.0
audios = batch["audios"]
y_hat = outputs
# calculate multi-resolution stft loss
sc_loss, mag_loss = calculate_2d_loss(
audios, tf.squeeze(y_hat, -1), self.stft_loss
)
# trick to prevent loss expoded here
sc_loss = tf.where(sc_loss >= 15.0, 0.0, sc_loss)
mag_loss = tf.where(mag_loss >= 15.0, 0.0, mag_loss)
# compute generator loss
gen_loss = 0.5 * (sc_loss + mag_loss)
if self.steps >= self.config["discriminator_train_start_steps"]:
p_hat = self._discriminator(y_hat)
p = self._discriminator(tf.expand_dims(audios, 2))
adv_loss = 0.0
for i in range(len(p_hat)):
adv_loss += calculate_3d_loss(
tf.ones_like(p_hat[i][-1]), p_hat[i][-1], loss_fn=self.mse_loss
)
adv_loss /= i + 1
# define feature-matching loss
fm_loss = 0.0
for i in range(len(p_hat)):
for j in range(len(p_hat[i]) - 1):
fm_loss += calculate_3d_loss(
p[i][j], p_hat[i][j], loss_fn=self.mae_loss
)
fm_loss /= (i + 1) * (j + 1)
adv_loss += self.config["lambda_feat_match"] * fm_loss
gen_loss += self.config["lambda_adv"] * adv_loss
dict_metrics_losses.update({"adversarial_loss": adv_loss})
dict_metrics_losses.update({"fm_loss": fm_loss})
dict_metrics_losses.update({"gen_loss": gen_loss})
dict_metrics_losses.update({"spectral_convergence_loss": sc_loss})
dict_metrics_losses.update({"log_magnitude_loss": mag_loss})
per_example_losses = gen_loss
return per_example_losses, dict_metrics_losses
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train MelGAN (See detail in tensorflow_tts/bin/train-melgan.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="use norm mels for training or raw."
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--generator_mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--discriminator_mixed_precision",
default=0,
type=int,
help="using mixed precision for discriminator or not.",
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="path of .h5 melgan generator to load weights from",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.generator_mixed_precision == 1 or args.discriminator_mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.generator_mixed_precision = bool(args.generator_mixed_precision)
args.discriminator_mixed_precision = bool(args.discriminator_mixed_precision)
args.use_norm = bool(args.use_norm)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify either --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["batch_max_steps"] // config[
"hop_size"
] + 2 * config["melgan_generator_params"].get("aux_context_window", 0)
else:
mel_length_threshold = None
if config["format"] == "npy":
audio_query = "*-wave.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
audio_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy are supported.")
# define train/valid dataset
train_dataset = AudioMelDataset(
root_dir=args.train_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(config["batch_max_steps"], dtype=tf.int32),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = AudioMelDataset(
root_dir=args.dev_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(
config["batch_max_steps_valid"], dtype=tf.int32
),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = MultiSTFTMelganTrainer(
steps=0,
epochs=0,
config=config,
strategy=STRATEGY,
is_generator_mixed_precision=args.generator_mixed_precision,
is_discriminator_mixed_precision=args.discriminator_mixed_precision,
)
with STRATEGY.scope():
# define generator and discriminator
generator = TFMelGANGenerator(
MELGAN_CONFIG.MelGANGeneratorConfig(**config["melgan_generator_params"]),
name="melgan_generator",
)
discriminator = TFMelGANMultiScaleDiscriminator(
MELGAN_CONFIG.MelGANDiscriminatorConfig(
**config["melgan_discriminator_params"]
),
name="melgan_discriminator",
)
# dummy input to build model.
fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)
y_hat = generator(fake_mels)
discriminator(y_hat)
if len(args.pretrained) > 1:
generator.load_weights(args.pretrained)
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
generator.summary()
discriminator.summary()
# define optimizer
generator_lr_fn = getattr(
tf.keras.optimizers.schedules, config["generator_optimizer_params"]["lr_fn"]
)(**config["generator_optimizer_params"]["lr_params"])
discriminator_lr_fn = getattr(
tf.keras.optimizers.schedules,
config["discriminator_optimizer_params"]["lr_fn"],
)(**config["discriminator_optimizer_params"]["lr_params"])
gen_optimizer = tf.keras.optimizers.Adam(
learning_rate=generator_lr_fn, amsgrad=False
)
dis_optimizer = tf.keras.optimizers.Adam(
learning_rate=discriminator_lr_fn, amsgrad=False
)
trainer.compile(
gen_model=generator,
dis_model=discriminator,
gen_optimizer=gen_optimizer,
dis_optimizer=dis_optimizer,
)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 13,562 | 32.655087 | 98 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/cpptflite/demo/text2ids.py | import sys
import re
eng_pat = re.compile("[a-zA-Z]+")
if __name__ == "__main__":
argvs = sys.argv
if (len(argvs) != 3):
print("usage: python3 {} mapper.json text".format(argvs[0]))
else:
from tensorflow_tts.inference import AutoProcessor
mapper_json = argvs[1]
processor = AutoProcessor.from_pretrained(pretrained_path=mapper_json)
input_text = argvs[2]
if eng_pat.match(input_text):
input_ids = processor.text_to_sequence(input_text)
else:
input_ids = processor.text_to_sequence(input_text, inference=True)
print(" ".join(str(i) for i in input_ids)) | 657 | 27.608696 | 78 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/multiband_melgan/train_multiband_melgan.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train Multi-Band MelGAN."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import numpy as np
import soundfile as sf
import yaml
from tensorflow.keras.mixed_precision import experimental as mixed_precision
import tensorflow_tts
from examples.melgan.audio_mel_dataset import AudioMelDataset
from examples.melgan.train_melgan import MelganTrainer, collater
from tensorflow_tts.configs import (
MultiBandMelGANDiscriminatorConfig,
MultiBandMelGANGeneratorConfig,
)
from tensorflow_tts.losses import TFMultiResolutionSTFT
from tensorflow_tts.models import (
TFPQMF,
TFMelGANGenerator,
TFMelGANMultiScaleDiscriminator,
)
from tensorflow_tts.utils import calculate_2d_loss, calculate_3d_loss, return_strategy
class MultiBandMelganTrainer(MelganTrainer):
"""Multi-Band MelGAN Trainer class based on MelganTrainer."""
def __init__(
self,
config,
strategy,
steps=0,
epochs=0,
is_generator_mixed_precision=False,
is_discriminator_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_generator_mixed_precision (bool): Use mixed precision for generator or not.
is_discriminator_mixed_precision (bool): Use mixed precision for discriminator or not.
"""
super(MultiBandMelganTrainer, self).__init__(
config=config,
steps=steps,
epochs=epochs,
strategy=strategy,
is_generator_mixed_precision=is_generator_mixed_precision,
is_discriminator_mixed_precision=is_discriminator_mixed_precision,
)
# define metrics to aggregates data and use tf.summary logs them
self.list_metrics_name = [
"adversarial_loss",
"subband_spectral_convergence_loss",
"subband_log_magnitude_loss",
"fullband_spectral_convergence_loss",
"fullband_log_magnitude_loss",
"gen_loss",
"real_loss",
"fake_loss",
"dis_loss",
]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
def compile(self, gen_model, dis_model, gen_optimizer, dis_optimizer, pqmf):
super().compile(gen_model, dis_model, gen_optimizer, dis_optimizer)
# define loss
self.sub_band_stft_loss = TFMultiResolutionSTFT(
**self.config["subband_stft_loss_params"]
)
self.full_band_stft_loss = TFMultiResolutionSTFT(
**self.config["stft_loss_params"]
)
# define pqmf module
self.pqmf = pqmf
def compute_per_example_generator_losses(self, batch, outputs):
"""Compute per example generator losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
dict_metrics_losses = {}
per_example_losses = 0.0
audios = batch["audios"]
y_mb_hat = outputs
y_hat = self.pqmf.synthesis(y_mb_hat)
y_mb = self.pqmf.analysis(tf.expand_dims(audios, -1))
y_mb = tf.transpose(y_mb, (0, 2, 1)) # [B, subbands, T//subbands]
y_mb = tf.reshape(y_mb, (-1, tf.shape(y_mb)[-1])) # [B * subbands, T']
y_mb_hat = tf.transpose(y_mb_hat, (0, 2, 1)) # [B, subbands, T//subbands]
y_mb_hat = tf.reshape(
y_mb_hat, (-1, tf.shape(y_mb_hat)[-1])
) # [B * subbands, T']
# calculate sub/full band spectral_convergence and log mag loss.
sub_sc_loss, sub_mag_loss = calculate_2d_loss(
y_mb, y_mb_hat, self.sub_band_stft_loss
)
sub_sc_loss = tf.reduce_mean(
tf.reshape(sub_sc_loss, [-1, self.pqmf.subbands]), -1
)
sub_mag_loss = tf.reduce_mean(
tf.reshape(sub_mag_loss, [-1, self.pqmf.subbands]), -1
)
full_sc_loss, full_mag_loss = calculate_2d_loss(
audios, tf.squeeze(y_hat, -1), self.full_band_stft_loss
)
# define generator loss
gen_loss = 0.5 * (sub_sc_loss + sub_mag_loss) + 0.5 * (
full_sc_loss + full_mag_loss
)
if self.steps >= self.config["discriminator_train_start_steps"]:
p_hat = self._discriminator(y_hat)
p = self._discriminator(tf.expand_dims(audios, 2))
adv_loss = 0.0
for i in range(len(p_hat)):
adv_loss += calculate_3d_loss(
tf.ones_like(p_hat[i][-1]), p_hat[i][-1], loss_fn=self.mse_loss
)
adv_loss /= i + 1
gen_loss += self.config["lambda_adv"] * adv_loss
dict_metrics_losses.update({"adversarial_loss": adv_loss},)
dict_metrics_losses.update({"gen_loss": gen_loss})
dict_metrics_losses.update({"subband_spectral_convergence_loss": sub_sc_loss})
dict_metrics_losses.update({"subband_log_magnitude_loss": sub_mag_loss})
dict_metrics_losses.update({"fullband_spectral_convergence_loss": full_sc_loss})
dict_metrics_losses.update({"fullband_log_magnitude_loss": full_mag_loss})
per_example_losses = gen_loss
return per_example_losses, dict_metrics_losses
def compute_per_example_discriminator_losses(self, batch, gen_outputs):
"""Compute per example discriminator losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
y_mb_hat = gen_outputs
y_hat = self.pqmf.synthesis(y_mb_hat)
(
per_example_losses,
dict_metrics_losses,
) = super().compute_per_example_discriminator_losses(batch, y_hat)
return per_example_losses, dict_metrics_losses
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
import matplotlib.pyplot as plt
y_mb_batch_ = self.one_step_predict(batch) # [B, T // subbands, subbands]
y_batch = batch["audios"]
utt_ids = batch["utt_ids"]
# convert to tensor.
# here we just take a sample at first replica.
try:
y_mb_batch_ = y_mb_batch_.values[0].numpy()
y_batch = y_batch.values[0].numpy()
utt_ids = utt_ids.values[0].numpy()
except Exception:
y_mb_batch_ = y_mb_batch_.numpy()
y_batch = y_batch.numpy()
utt_ids = utt_ids.numpy()
y_batch_ = self.pqmf.synthesis(y_mb_batch_).numpy() # [B, T, 1]
# check directory
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (y, y_) in enumerate(zip(y_batch, y_batch_), 0):
# convert to ndarray
y, y_ = tf.reshape(y, [-1]).numpy(), tf.reshape(y_, [-1]).numpy()
# plit figure and save it
utt_id = utt_ids[idx]
figname = os.path.join(dirname, f"{utt_id}.png")
plt.subplot(2, 1, 1)
plt.plot(y)
plt.title("groundtruth speech")
plt.subplot(2, 1, 2)
plt.plot(y_)
plt.title(f"generated speech @ {self.steps} steps")
plt.tight_layout()
plt.savefig(figname)
plt.close()
# save as wavefile
y = np.clip(y, -1, 1)
y_ = np.clip(y_, -1, 1)
sf.write(
figname.replace(".png", "_ref.wav"),
y,
self.config["sampling_rate"],
"PCM_16",
)
sf.write(
figname.replace(".png", "_gen.wav"),
y_,
self.config["sampling_rate"],
"PCM_16",
)
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train MultiBand MelGAN (See detail in examples/multiband_melgan/train_multiband_melgan.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="use norm mels for training or raw."
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--generator_mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--discriminator_mixed_precision",
default=0,
type=int,
help="using mixed precision for discriminator or not.",
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="path of .h5 mb-melgan generator to load weights from",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.generator_mixed_precision == 1 or args.discriminator_mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.generator_mixed_precision = bool(args.generator_mixed_precision)
args.discriminator_mixed_precision = bool(args.discriminator_mixed_precision)
args.use_norm = bool(args.use_norm)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify either --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["batch_max_steps"] // config[
"hop_size"
] + 2 * config["multiband_melgan_generator_params"].get("aux_context_window", 0)
else:
mel_length_threshold = None
if config["format"] == "npy":
audio_query = "*-wave.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
audio_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy are supported.")
# define train/valid dataset
train_dataset = AudioMelDataset(
root_dir=args.train_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(config["batch_max_steps"], dtype=tf.int32),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = AudioMelDataset(
root_dir=args.dev_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(
config["batch_max_steps_valid"], dtype=tf.int32
),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = MultiBandMelganTrainer(
steps=0,
epochs=0,
config=config,
strategy=STRATEGY,
is_generator_mixed_precision=args.generator_mixed_precision,
is_discriminator_mixed_precision=args.discriminator_mixed_precision,
)
with STRATEGY.scope():
# define generator and discriminator
generator = TFMelGANGenerator(
MultiBandMelGANGeneratorConfig(
**config["multiband_melgan_generator_params"]
),
name="multi_band_melgan_generator",
)
discriminator = TFMelGANMultiScaleDiscriminator(
MultiBandMelGANDiscriminatorConfig(
**config["multiband_melgan_discriminator_params"]
),
name="multi_band_melgan_discriminator",
)
pqmf = TFPQMF(
MultiBandMelGANGeneratorConfig(
**config["multiband_melgan_generator_params"]
),
dtype=tf.float32,
name="pqmf",
)
# dummy input to build model.
fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)
y_mb_hat = generator(fake_mels)
y_hat = pqmf.synthesis(y_mb_hat)
discriminator(y_hat)
if len(args.pretrained) > 1:
generator.load_weights(args.pretrained)
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
generator.summary()
discriminator.summary()
# define optimizer
generator_lr_fn = getattr(
tf.keras.optimizers.schedules, config["generator_optimizer_params"]["lr_fn"]
)(**config["generator_optimizer_params"]["lr_params"])
discriminator_lr_fn = getattr(
tf.keras.optimizers.schedules,
config["discriminator_optimizer_params"]["lr_fn"],
)(**config["discriminator_optimizer_params"]["lr_params"])
gen_optimizer = tf.keras.optimizers.Adam(
learning_rate=generator_lr_fn,
amsgrad=config["generator_optimizer_params"]["amsgrad"],
)
dis_optimizer = tf.keras.optimizers.Adam(
learning_rate=discriminator_lr_fn,
amsgrad=config["discriminator_optimizer_params"]["amsgrad"],
)
trainer.compile(
gen_model=generator,
dis_model=discriminator,
gen_optimizer=gen_optimizer,
dis_optimizer=dis_optimizer,
pqmf=pqmf,
)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 18,014 | 33.379771 | 112 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/multiband_melgan/decode_mb_melgan.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decode trained Mb-Melgan from folder."""
import argparse
import logging
import os
import numpy as np
import soundfile as sf
import yaml
from tqdm import tqdm
from tensorflow_tts.configs import MultiBandMelGANGeneratorConfig
from tensorflow_tts.datasets import MelDataset
from tensorflow_tts.models import TFPQMF, TFMelGANGenerator
def main():
"""Run melgan decoding from folder."""
parser = argparse.ArgumentParser(
description="Generate Audio from melspectrogram with trained melgan "
"(See detail in example/melgan/decode_melgan.py)."
)
parser.add_argument(
"--rootdir",
default=None,
type=str,
required=True,
help="directory including ids/durations files.",
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save generated speech."
)
parser.add_argument(
"--checkpoint", type=str, required=True, help="checkpoint file to be loaded."
)
parser.add_argument(
"--use-norm", type=int, default=1, help="Use norm or raw melspectrogram."
)
parser.add_argument("--batch-size", type=int, default=8, help="batch_size.")
parser.add_argument(
"--config",
default=None,
type=str,
required=True,
help="yaml format configuration file. if not explicitly provided, "
"it will be searched in the checkpoint directory. (default=None)",
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# load config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
if config["format"] == "npy":
mel_query = "*-fs-after-feats.npy" if "fastspeech" in args.rootdir else "*-norm-feats.npy" if args.use_norm == 1 else "*-raw-feats.npy"
mel_load_fn = np.load
else:
raise ValueError("Only npy is supported.")
# define data-loader
dataset = MelDataset(
root_dir=args.rootdir,
mel_query=mel_query,
mel_load_fn=mel_load_fn,
)
dataset = dataset.create(batch_size=args.batch_size)
# define model and load checkpoint
mb_melgan = TFMelGANGenerator(
config=MultiBandMelGANGeneratorConfig(**config["multiband_melgan_generator_params"]),
name="multiband_melgan_generator",
)
mb_melgan._build()
mb_melgan.load_weights(args.checkpoint)
pqmf = TFPQMF(
config=MultiBandMelGANGeneratorConfig(**config["multiband_melgan_generator_params"]), name="pqmf"
)
for data in tqdm(dataset, desc="[Decoding]"):
utt_ids, mels, mel_lengths = data["utt_ids"], data["mels"], data["mel_lengths"]
# melgan inference.
generated_subbands = mb_melgan(mels)
generated_audios = pqmf.synthesis(generated_subbands)
# convert to numpy.
generated_audios = generated_audios.numpy() # [B, T]
# save to outdir
for i, audio in enumerate(generated_audios):
utt_id = utt_ids[i].numpy().decode("utf-8")
sf.write(
os.path.join(args.outdir, f"{utt_id}.wav"),
audio[: mel_lengths[i].numpy() * config["hop_size"]],
config["sampling_rate"],
"PCM_16",
)
if __name__ == "__main__":
main()
| 4,752 | 31.554795 | 143 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/fastspeech2/decode_fastspeech2.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decode trained FastSpeech from folders."""
import argparse
import logging
import os
import sys
sys.path.append(".")
import numpy as np
import tensorflow as tf
import yaml
from tqdm import tqdm
from examples.fastspeech.fastspeech_dataset import CharactorDataset
from tensorflow_tts.configs import FastSpeech2Config
from tensorflow_tts.models import TFFastSpeech2
def main():
"""Run fastspeech2 decoding from folder."""
parser = argparse.ArgumentParser(
description="Decode soft-mel features from charactor with trained FastSpeech "
"(See detail in examples/fastspeech2/decode_fastspeech2.py)."
)
parser.add_argument(
"--rootdir",
default=None,
type=str,
required=True,
help="directory including ids/durations files.",
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save generated speech."
)
parser.add_argument(
"--checkpoint", type=str, required=True, help="checkpoint file to be loaded."
)
parser.add_argument(
"--config",
default=None,
type=str,
required=True,
help="yaml format configuration file. if not explicitly provided, "
"it will be searched in the checkpoint directory. (default=None)",
)
parser.add_argument(
"--batch-size",
default=8,
type=int,
required=False,
help="Batch size for inference.",
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# load config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
if config["format"] == "npy":
char_query = "*-ids.npy"
char_load_fn = np.load
else:
raise ValueError("Only npy is supported.")
# define data-loader
dataset = CharactorDataset(
root_dir=args.rootdir,
charactor_query=char_query,
charactor_load_fn=char_load_fn,
)
dataset = dataset.create(batch_size=args.batch_size)
# define model and load checkpoint
fastspeech2 = TFFastSpeech2(
config=FastSpeech2Config(**config["fastspeech2_params"]), name="fastspeech2"
)
fastspeech2._build()
fastspeech2.load_weights(args.checkpoint)
for data in tqdm(dataset, desc="Decoding"):
utt_ids = data["utt_ids"]
char_ids = data["input_ids"]
# fastspeech inference.
(
masked_mel_before,
masked_mel_after,
duration_outputs,
_,
_,
) = fastspeech2.inference(
char_ids,
speaker_ids=tf.zeros(shape=[tf.shape(char_ids)[0]], dtype=tf.int32),
speed_ratios=tf.ones(shape=[tf.shape(char_ids)[0]], dtype=tf.float32),
f0_ratios=tf.ones(shape=[tf.shape(char_ids)[0]], dtype=tf.float32),
energy_ratios=tf.ones(shape=[tf.shape(char_ids)[0]], dtype=tf.float32),
)
# convert to numpy
masked_mel_befores = masked_mel_before.numpy()
masked_mel_afters = masked_mel_after.numpy()
for (utt_id, mel_before, mel_after, durations) in zip(
utt_ids, masked_mel_befores, masked_mel_afters, duration_outputs
):
# real len of mel predicted
real_length = durations.numpy().sum()
utt_id = utt_id.numpy().decode("utf-8")
# save to folder.
np.save(
os.path.join(args.outdir, f"{utt_id}-fs-before-feats.npy"),
mel_before[:real_length, :].astype(np.float32),
allow_pickle=False,
)
np.save(
os.path.join(args.outdir, f"{utt_id}-fs-after-feats.npy"),
mel_after[:real_length, :].astype(np.float32),
allow_pickle=False,
)
if __name__ == "__main__":
main()
| 5,326 | 30.708333 | 87 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/fastspeech2/fastspeech2_dataset.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset modules."""
import itertools
import logging
import os
import random
import numpy as np
import tensorflow as tf
from tensorflow_tts.datasets.abstract_dataset import AbstractDataset
from tensorflow_tts.utils import find_files
def average_by_duration(x, durs):
mel_len = durs.sum()
durs_cum = np.cumsum(np.pad(durs, (1, 0)))
# calculate charactor f0/energy
x_char = np.zeros((durs.shape[0],), dtype=np.float32)
for idx, start, end in zip(range(mel_len), durs_cum[:-1], durs_cum[1:]):
values = x[start:end][np.where(x[start:end] != 0.0)[0]]
x_char[idx] = np.mean(values) if len(values) > 0 else 0.0 # np.mean([]) = nan.
return x_char.astype(np.float32)
def tf_average_by_duration(x, durs):
outs = tf.numpy_function(average_by_duration, [x, durs], tf.float32)
return outs
class CharactorDurationF0EnergyMelDataset(AbstractDataset):
"""Tensorflow Charactor Duration F0 Energy Mel dataset."""
def __init__(
self,
root_dir,
charactor_query="*-ids.npy",
mel_query="*-norm-feats.npy",
duration_query="*-durations.npy",
f0_query="*-raw-f0.npy",
energy_query="*-raw-energy.npy",
f0_stat="./dump/stats_f0.npy",
energy_stat="./dump/stats_energy.npy",
charactor_load_fn=np.load,
mel_load_fn=np.load,
duration_load_fn=np.load,
f0_load_fn=np.load,
energy_load_fn=np.load,
mel_length_threshold=0,
):
"""Initialize dataset.
Args:
root_dir (str): Root directory including dumped files.
charactor_query (str): Query to find charactor files in root_dir.
mel_query (str): Query to find feature files in root_dir.
duration_query (str): Query to find duration files in root_dir.
f0_query (str): Query to find f0 files in root_dir.
energy_query (str): Query to find energy files in root_dir.
f0_stat (str): str path of f0_stat.
energy_stat (str): str path of energy_stat.
charactor_load_fn (func): Function to load charactor file.
mel_load_fn (func): Function to load feature file.
duration_load_fn (func): Function to load duration file.
f0_load_fn (func): Function to load f0 file.
energy_load_fn (func): Function to load energy file.
mel_length_threshold (int): Threshold to remove short feature files.
"""
# find all of charactor and mel files.
charactor_files = sorted(find_files(root_dir, charactor_query))
mel_files = sorted(find_files(root_dir, mel_query))
duration_files = sorted(find_files(root_dir, duration_query))
f0_files = sorted(find_files(root_dir, f0_query))
energy_files = sorted(find_files(root_dir, energy_query))
# assert the number of files
assert len(mel_files) != 0, f"Not found any mels files in ${root_dir}."
assert (
len(mel_files)
== len(charactor_files)
== len(duration_files)
== len(f0_files)
== len(energy_files)
), f"Number of charactor, mel, duration, f0 and energy files are different"
if ".npy" in charactor_query:
suffix = charactor_query[1:]
utt_ids = [os.path.basename(f).replace(suffix, "") for f in charactor_files]
# set global params
self.utt_ids = utt_ids
self.mel_files = mel_files
self.charactor_files = charactor_files
self.duration_files = duration_files
self.f0_files = f0_files
self.energy_files = energy_files
self.mel_load_fn = mel_load_fn
self.charactor_load_fn = charactor_load_fn
self.duration_load_fn = duration_load_fn
self.f0_load_fn = f0_load_fn
self.energy_load_fn = energy_load_fn
self.mel_length_threshold = mel_length_threshold
self.f0_stat = np.load(f0_stat)
self.energy_stat = np.load(energy_stat)
def get_args(self):
return [self.utt_ids]
def _norm_mean_std(self, x, mean, std):
zero_idxs = np.where(x == 0.0)[0]
x = (x - mean) / std
x[zero_idxs] = 0.0
return x
def _norm_mean_std_tf(self, x, mean, std):
x = tf.numpy_function(self._norm_mean_std, [x, mean, std], tf.float32)
return x
def generator(self, utt_ids):
for i, utt_id in enumerate(utt_ids):
mel_file = self.mel_files[i]
charactor_file = self.charactor_files[i]
duration_file = self.duration_files[i]
f0_file = self.f0_files[i]
energy_file = self.energy_files[i]
items = {
"utt_ids": utt_id,
"mel_files": mel_file,
"charactor_files": charactor_file,
"duration_files": duration_file,
"f0_files": f0_file,
"energy_files": energy_file,
}
yield items
@tf.function
def _load_data(self, items):
mel = tf.numpy_function(np.load, [items["mel_files"]], tf.float32)
charactor = tf.numpy_function(np.load, [items["charactor_files"]], tf.int32)
duration = tf.numpy_function(np.load, [items["duration_files"]], tf.int32)
f0 = tf.numpy_function(np.load, [items["f0_files"]], tf.float32)
energy = tf.numpy_function(np.load, [items["energy_files"]], tf.float32)
f0 = self._norm_mean_std_tf(f0, self.f0_stat[0], self.f0_stat[1])
energy = self._norm_mean_std_tf(
energy, self.energy_stat[0], self.energy_stat[1]
)
# calculate charactor f0/energy
f0 = tf_average_by_duration(f0, duration)
energy = tf_average_by_duration(energy, duration)
items = {
"utt_ids": items["utt_ids"],
"input_ids": charactor,
"speaker_ids": 0,
"duration_gts": duration,
"f0_gts": f0,
"energy_gts": energy,
"mel_gts": mel,
"mel_lengths": len(mel),
}
return items
def create(
self,
allow_cache=False,
batch_size=1,
is_shuffle=False,
map_fn=None,
reshuffle_each_iteration=True,
):
"""Create tf.dataset function."""
output_types = self.get_output_dtypes()
datasets = tf.data.Dataset.from_generator(
self.generator, output_types=output_types, args=(self.get_args())
)
# load data
datasets = datasets.map(
lambda items: self._load_data(items), tf.data.experimental.AUTOTUNE
)
datasets = datasets.filter(
lambda x: x["mel_lengths"] > self.mel_length_threshold
)
if allow_cache:
datasets = datasets.cache()
if is_shuffle:
datasets = datasets.shuffle(
self.get_len_dataset(),
reshuffle_each_iteration=reshuffle_each_iteration,
)
# define padded shapes
padded_shapes = {
"utt_ids": [],
"input_ids": [None],
"speaker_ids": [],
"duration_gts": [None],
"f0_gts": [None],
"energy_gts": [None],
"mel_gts": [None, None],
"mel_lengths": [],
}
datasets = datasets.padded_batch(
batch_size, padded_shapes=padded_shapes, drop_remainder=True
)
datasets = datasets.prefetch(tf.data.experimental.AUTOTUNE)
return datasets
def get_output_dtypes(self):
output_types = {
"utt_ids": tf.string,
"mel_files": tf.string,
"charactor_files": tf.string,
"duration_files": tf.string,
"f0_files": tf.string,
"energy_files": tf.string,
}
return output_types
def get_len_dataset(self):
return len(self.utt_ids)
def __name__(self):
return "CharactorDurationF0EnergyMelDataset"
| 8,662 | 33.376984 | 88 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/fastspeech2/extractfs_postnets.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decode trained FastSpeech from folders."""
import argparse
import logging
import os
import sys
sys.path.append(".")
import numpy as np
import tensorflow as tf
import yaml
from tqdm import tqdm
from examples.fastspeech2.fastspeech2_dataset import CharactorDurationF0EnergyMelDataset
from tensorflow_tts.configs import FastSpeech2Config
from tensorflow_tts.models import TFFastSpeech2
def main():
"""Run fastspeech2 decoding from folder."""
parser = argparse.ArgumentParser(
description="Decode soft-mel features from charactor with trained FastSpeech "
"(See detail in examples/fastspeech2/decode_fastspeech2.py)."
)
parser.add_argument(
"--rootdir",
default=None,
type=str,
required=True,
help="directory including ids/durations files.",
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save generated speech."
)
parser.add_argument(
"--checkpoint", type=str, required=True, help="checkpoint file to be loaded."
)
parser.add_argument(
"--config",
default=None,
type=str,
required=True,
help="yaml format configuration file. if not explicitly provided, "
"it will be searched in the checkpoint directory. (default=None)",
)
parser.add_argument(
"--batch-size",
default=8,
type=int,
required=False,
help="Batch size for inference.",
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# load config
outdpost = os.path.join(args.outdir, "postnets")
if not os.path.exists(outdpost):
os.makedirs(outdpost)
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
if config["format"] == "npy":
char_query = "*-ids.npy"
char_load_fn = np.load
else:
raise ValueError("Only npy is supported.")
# define data-loader
dataset = CharactorDurationF0EnergyMelDataset(
root_dir=args.rootdir,
charactor_query=char_query,
charactor_load_fn=char_load_fn,
)
dataset = dataset.create(
batch_size=1
) # force batch size to 1 otherwise it may miss certain files
# define model and load checkpoint
fastspeech2 = TFFastSpeech2(
config=FastSpeech2Config(**config["fastspeech2_params"]), name="fastspeech2"
)
fastspeech2._build()
fastspeech2.load_weights(args.checkpoint)
fastspeech2 = tf.function(fastspeech2, experimental_relax_shapes=True)
for data in tqdm(dataset, desc="Decoding"):
utt_ids = data["utt_ids"]
char_ids = data["input_ids"]
mel_lens = data["mel_lengths"]
# fastspeech inference.
masked_mel_before, masked_mel_after, duration_outputs, _, _ = fastspeech2(
**data, training=True
)
# convert to numpy
masked_mel_befores = masked_mel_before.numpy()
masked_mel_afters = masked_mel_after.numpy()
for (utt_id, mel_before, mel_after, durations, mel_len) in zip(
utt_ids, masked_mel_befores, masked_mel_afters, duration_outputs, mel_lens
):
# real len of mel predicted
real_length = np.around(durations.numpy().sum()).astype(int)
utt_id = utt_id.numpy().decode("utf-8")
np.save(
os.path.join(outdpost, f"{utt_id}-postnet.npy"),
mel_after[:mel_len, :].astype(np.float32),
allow_pickle=False,
)
if __name__ == "__main__":
main()
| 5,053 | 30.006135 | 88 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/fastspeech2/train_fastspeech2.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train FastSpeech2."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import numpy as np
import yaml
from tqdm import tqdm
import tensorflow_tts
from examples.fastspeech2.fastspeech2_dataset import CharactorDurationF0EnergyMelDataset
from examples.fastspeech.train_fastspeech import FastSpeechTrainer
from tensorflow_tts.configs import FastSpeech2Config
from tensorflow_tts.models import TFFastSpeech2
from tensorflow_tts.optimizers import AdamWeightDecay, WarmUp
from tensorflow_tts.trainers import Seq2SeqBasedTrainer
from tensorflow_tts.utils import calculate_2d_loss, calculate_3d_loss, return_strategy
class FastSpeech2Trainer(Seq2SeqBasedTrainer):
"""FastSpeech2 Trainer class based on FastSpeechTrainer."""
def __init__(
self, config, strategy, steps=0, epochs=0, is_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_mixed_precision (bool): Use mixed precision or not.
"""
super(FastSpeech2Trainer, self).__init__(
steps=steps,
epochs=epochs,
config=config,
strategy=strategy,
is_mixed_precision=is_mixed_precision,
)
# define metrics to aggregates data and use tf.summary logs them
self.list_metrics_name = [
"duration_loss",
"f0_loss",
"energy_loss",
"mel_loss_before",
"mel_loss_after",
]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
def compile(self, model, optimizer):
super().compile(model, optimizer)
self.mse = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE
)
self.mae = tf.keras.losses.MeanAbsoluteError(
reduction=tf.keras.losses.Reduction.NONE
)
def compute_per_example_losses(self, batch, outputs):
"""Compute per example losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
mel_before, mel_after, duration_outputs, f0_outputs, energy_outputs = outputs
log_duration = tf.math.log(
tf.cast(tf.math.add(batch["duration_gts"], 1), tf.float32)
)
duration_loss = calculate_2d_loss(log_duration, duration_outputs, self.mse)
f0_loss = calculate_2d_loss(batch["f0_gts"], f0_outputs, self.mse)
energy_loss = calculate_2d_loss(batch["energy_gts"], energy_outputs, self.mse)
mel_loss_before = calculate_3d_loss(batch["mel_gts"], mel_before, self.mae)
mel_loss_after = calculate_3d_loss(batch["mel_gts"], mel_after, self.mae)
per_example_losses = (
duration_loss + f0_loss + energy_loss + mel_loss_before + mel_loss_after
)
dict_metrics_losses = {
"duration_loss": duration_loss,
"f0_loss": f0_loss,
"energy_loss": energy_loss,
"mel_loss_before": mel_loss_before,
"mel_loss_after": mel_loss_after,
}
return per_example_losses, dict_metrics_losses
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
import matplotlib.pyplot as plt
# predict with tf.function.
outputs = self.one_step_predict(batch)
mels_before, mels_after, *_ = outputs
mel_gts = batch["mel_gts"]
utt_ids = batch["utt_ids"]
# convert to tensor.
# here we just take a sample at first replica.
try:
mels_before = mels_before.values[0].numpy()
mels_after = mels_after.values[0].numpy()
mel_gts = mel_gts.values[0].numpy()
utt_ids = utt_ids.values[0].numpy()
except Exception:
mels_before = mels_before.numpy()
mels_after = mels_after.numpy()
mel_gts = mel_gts.numpy()
utt_ids = utt_ids.numpy()
# check directory
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (mel_gt, mel_before, mel_after) in enumerate(
zip(mel_gts, mels_before, mels_after), 0
):
mel_gt = tf.reshape(mel_gt, (-1, 80)).numpy() # [length, 80]
mel_before = tf.reshape(mel_before, (-1, 80)).numpy() # [length, 80]
mel_after = tf.reshape(mel_after, (-1, 80)).numpy() # [length, 80]
# plit figure and save it
utt_id = utt_ids[idx]
figname = os.path.join(dirname, f"{utt_id}.png")
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(311)
ax2 = fig.add_subplot(312)
ax3 = fig.add_subplot(313)
im = ax1.imshow(np.rot90(mel_gt), aspect="auto", interpolation="none")
ax1.set_title("Target Mel-Spectrogram")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax1)
ax2.set_title("Predicted Mel-before-Spectrogram")
im = ax2.imshow(np.rot90(mel_before), aspect="auto", interpolation="none")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax2)
ax3.set_title("Predicted Mel-after-Spectrogram")
im = ax3.imshow(np.rot90(mel_after), aspect="auto", interpolation="none")
fig.colorbar(mappable=im, shrink=0.65, orientation="horizontal", ax=ax3)
plt.tight_layout()
plt.savefig(figname)
plt.close()
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train FastSpeech (See detail in tensorflow_tts/bin/train-fastspeech.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="usr norm-mels for train or raw."
)
parser.add_argument(
"--f0-stat",
default="./dump/stats_f0.npy",
type=str,
required=True,
help="f0-stat path.",
)
parser.add_argument(
"--energy-stat",
default="./dump/stats_energy.npy",
type=str,
required=True,
help="energy-stat path.",
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="pretrained weights .h5 file to load weights from. Auto-skips non-matching layers",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.mixed_precision = bool(args.mixed_precision)
args.use_norm = bool(args.use_norm)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["mel_length_threshold"]
else:
mel_length_threshold = None
if config["format"] == "npy":
charactor_query = "*-ids.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
duration_query = "*-durations.npy"
f0_query = "*-raw-f0.npy"
energy_query = "*-raw-energy.npy"
else:
raise ValueError("Only npy are supported.")
# define train/valid dataset
train_dataset = CharactorDurationF0EnergyMelDataset(
root_dir=args.train_dir,
charactor_query=charactor_query,
mel_query=mel_query,
duration_query=duration_query,
f0_query=f0_query,
energy_query=energy_query,
f0_stat=args.f0_stat,
energy_stat=args.energy_stat,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = CharactorDurationF0EnergyMelDataset(
root_dir=args.dev_dir,
charactor_query=charactor_query,
mel_query=mel_query,
duration_query=duration_query,
f0_query=f0_query,
energy_query=energy_query,
f0_stat=args.f0_stat,
energy_stat=args.energy_stat,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = FastSpeech2Trainer(
config=config,
strategy=STRATEGY,
steps=0,
epochs=0,
is_mixed_precision=args.mixed_precision,
)
with STRATEGY.scope():
# define model
fastspeech = TFFastSpeech2(
config=FastSpeech2Config(**config["fastspeech2_params"])
)
fastspeech._build()
fastspeech.summary()
if len(args.pretrained) > 1:
fastspeech.load_weights(args.pretrained, by_name=True, skip_mismatch=True)
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
# AdamW for fastspeech
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_steps=config["optimizer_params"]["decay_steps"],
end_learning_rate=config["optimizer_params"]["end_learning_rate"],
)
learning_rate_fn = WarmUp(
initial_learning_rate=config["optimizer_params"]["initial_learning_rate"],
decay_schedule_fn=learning_rate_fn,
warmup_steps=int(
config["train_max_steps"]
* config["optimizer_params"]["warmup_proportion"]
),
)
optimizer = AdamWeightDecay(
learning_rate=learning_rate_fn,
weight_decay_rate=config["optimizer_params"]["weight_decay"],
beta_1=0.9,
beta_2=0.98,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"],
)
_ = optimizer.iterations
# compile trainer
trainer.compile(model=fastspeech, optimizer=optimizer)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 14,446 | 33.562201 | 96 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/hifigan/train_hifigan.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train Hifigan."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import numpy as np
import soundfile as sf
import yaml
from tqdm import tqdm
import tensorflow_tts
from examples.melgan.audio_mel_dataset import AudioMelDataset
from examples.melgan.train_melgan import collater
from examples.melgan_stft.train_melgan_stft import MultiSTFTMelganTrainer
from tensorflow_tts.configs import (
HifiGANDiscriminatorConfig,
HifiGANGeneratorConfig,
MelGANDiscriminatorConfig,
)
from tensorflow_tts.models import (
TFHifiGANGenerator,
TFHifiGANMultiPeriodDiscriminator,
TFMelGANMultiScaleDiscriminator,
)
from tensorflow_tts.utils import return_strategy
class TFHifiGANDiscriminator(tf.keras.Model):
def __init__(self, multiperiod_dis, multiscale_dis, **kwargs):
super().__init__(**kwargs)
self.multiperiod_dis = multiperiod_dis
self.multiscale_dis = multiscale_dis
def call(self, x):
outs = []
period_outs = self.multiperiod_dis(x)
scale_outs = self.multiscale_dis(x)
outs.extend(period_outs)
outs.extend(scale_outs)
return outs
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train Hifigan (See detail in examples/hifigan/train_hifigan.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="use norm mels for training or raw."
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--generator_mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--discriminator_mixed_precision",
default=0,
type=int,
help="using mixed precision for discriminator or not.",
)
parser.add_argument(
"--pretrained",
default="",
type=str,
nargs="?",
help="path of .h5 melgan generator to load weights from",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.generator_mixed_precision == 1 or args.discriminator_mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.generator_mixed_precision = bool(args.generator_mixed_precision)
args.discriminator_mixed_precision = bool(args.discriminator_mixed_precision)
args.use_norm = bool(args.use_norm)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify either --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["batch_max_steps"] // config[
"hop_size"
] + 2 * config["hifigan_generator_params"].get("aux_context_window", 0)
else:
mel_length_threshold = None
if config["format"] == "npy":
audio_query = "*-wave.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
audio_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy are supported.")
# define train/valid dataset
train_dataset = AudioMelDataset(
root_dir=args.train_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(config["batch_max_steps"], dtype=tf.int32),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = AudioMelDataset(
root_dir=args.dev_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(
config["batch_max_steps_valid"], dtype=tf.int32
),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = MultiSTFTMelganTrainer(
steps=0,
epochs=0,
config=config,
strategy=STRATEGY,
is_generator_mixed_precision=args.generator_mixed_precision,
is_discriminator_mixed_precision=args.discriminator_mixed_precision,
)
with STRATEGY.scope():
# define generator and discriminator
generator = TFHifiGANGenerator(
HifiGANGeneratorConfig(**config["hifigan_generator_params"]),
name="hifigan_generator",
)
multiperiod_discriminator = TFHifiGANMultiPeriodDiscriminator(
HifiGANDiscriminatorConfig(**config["hifigan_discriminator_params"]),
name="hifigan_multiperiod_discriminator",
)
multiscale_discriminator = TFMelGANMultiScaleDiscriminator(
MelGANDiscriminatorConfig(
**config["melgan_discriminator_params"],
name="melgan_multiscale_discriminator",
)
)
discriminator = TFHifiGANDiscriminator(
multiperiod_discriminator,
multiscale_discriminator,
name="hifigan_discriminator",
)
# dummy input to build model.
fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)
y_hat = generator(fake_mels)
discriminator(y_hat)
if len(args.pretrained) > 1:
generator.load_weights(args.pretrained)
logging.info(
f"Successfully loaded pretrained weight from {args.pretrained}."
)
generator.summary()
discriminator.summary()
# define optimizer
generator_lr_fn = getattr(
tf.keras.optimizers.schedules, config["generator_optimizer_params"]["lr_fn"]
)(**config["generator_optimizer_params"]["lr_params"])
discriminator_lr_fn = getattr(
tf.keras.optimizers.schedules,
config["discriminator_optimizer_params"]["lr_fn"],
)(**config["discriminator_optimizer_params"]["lr_params"])
gen_optimizer = tf.keras.optimizers.Adam(
learning_rate=generator_lr_fn,
amsgrad=config["generator_optimizer_params"]["amsgrad"],
)
dis_optimizer = tf.keras.optimizers.Adam(
learning_rate=discriminator_lr_fn,
amsgrad=config["discriminator_optimizer_params"]["amsgrad"],
)
trainer.compile(
gen_model=generator,
dis_model=discriminator,
gen_optimizer=gen_optimizer,
dis_optimizer=dis_optimizer,
)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 10,464 | 31.101227 | 88 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/parallel_wavegan/train_parallel_wavegan.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train ParallelWavegan."""
import tensorflow as tf
physical_devices = tf.config.list_physical_devices("GPU")
for i in range(len(physical_devices)):
tf.config.experimental.set_memory_growth(physical_devices[i], True)
import sys
sys.path.append(".")
import argparse
import logging
import os
import soundfile as sf
import numpy as np
import yaml
import tensorflow_tts
from examples.melgan.audio_mel_dataset import AudioMelDataset
from examples.melgan.train_melgan import collater
from tensorflow_tts.configs import (
ParallelWaveGANGeneratorConfig,
ParallelWaveGANDiscriminatorConfig,
)
from tensorflow_tts.models import (
TFParallelWaveGANGenerator,
TFParallelWaveGANDiscriminator,
)
from tensorflow_tts.trainers import GanBasedTrainer
from tensorflow_tts.losses import TFMultiResolutionSTFT
from tensorflow_tts.utils import calculate_2d_loss, calculate_3d_loss, return_strategy
from tensorflow_addons.optimizers import RectifiedAdam
class ParallelWaveganTrainer(GanBasedTrainer):
"""ParallelWaveGAN Trainer class based on GanBasedTrainer."""
def __init__(
self,
config,
strategy,
steps=0,
epochs=0,
is_generator_mixed_precision=False,
is_discriminator_mixed_precision=False,
):
"""Initialize trainer.
Args:
steps (int): Initial global steps.
epochs (int): Initial global epochs.
config (dict): Config dict loaded from yaml format configuration file.
is_generator_mixed_precision (bool): Use mixed precision for generator or not.
is_discriminator_mixed_precision (bool): Use mixed precision for discriminator or not.
"""
super(ParallelWaveganTrainer, self).__init__(
config=config,
steps=steps,
epochs=epochs,
strategy=strategy,
is_generator_mixed_precision=is_generator_mixed_precision,
is_discriminator_mixed_precision=is_discriminator_mixed_precision,
)
self.list_metrics_name = [
"adversarial_loss",
"gen_loss",
"real_loss",
"fake_loss",
"dis_loss",
"spectral_convergence_loss",
"log_magnitude_loss",
]
self.init_train_eval_metrics(self.list_metrics_name)
self.reset_states_train()
self.reset_states_eval()
def compile(self, gen_model, dis_model, gen_optimizer, dis_optimizer):
super().compile(gen_model, dis_model, gen_optimizer, dis_optimizer)
# define loss
self.stft_loss = TFMultiResolutionSTFT(**self.config["stft_loss_params"])
self.mse_loss = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE
)
self.mae_loss = tf.keras.losses.MeanAbsoluteError(
reduction=tf.keras.losses.Reduction.NONE
)
def compute_per_example_generator_losses(self, batch, outputs):
"""Compute per example generator losses and return dict_metrics_losses
Note that all element of the loss MUST has a shape [batch_size] and
the keys of dict_metrics_losses MUST be in self.list_metrics_name.
Args:
batch: dictionary batch input return from dataloader
outputs: outputs of the model
Returns:
per_example_losses: per example losses for each GPU, shape [B]
dict_metrics_losses: dictionary loss.
"""
dict_metrics_losses = {}
per_example_losses = 0.0
audios = batch["audios"]
y_hat = outputs
# calculate multi-resolution stft loss
sc_loss, mag_loss = calculate_2d_loss(
audios, tf.squeeze(y_hat, -1), self.stft_loss
)
gen_loss = 0.5 * (sc_loss + mag_loss)
if self.steps >= self.config["discriminator_train_start_steps"]:
p_hat = self._discriminator(y_hat)
p = self._discriminator(tf.expand_dims(audios, 2))
adv_loss = 0.0
adv_loss += calculate_3d_loss(
tf.ones_like(p_hat), p_hat, loss_fn=self.mse_loss
)
gen_loss += self.config["lambda_adv"] * adv_loss
# update dict_metrics_losses
dict_metrics_losses.update({"adversarial_loss": adv_loss})
dict_metrics_losses.update({"gen_loss": gen_loss})
dict_metrics_losses.update({"spectral_convergence_loss": sc_loss})
dict_metrics_losses.update({"log_magnitude_loss": mag_loss})
per_example_losses = gen_loss
return per_example_losses, dict_metrics_losses
def compute_per_example_discriminator_losses(self, batch, gen_outputs):
audios = batch["audios"]
y_hat = gen_outputs
y = tf.expand_dims(audios, 2)
p = self._discriminator(y)
p_hat = self._discriminator(y_hat)
real_loss = 0.0
fake_loss = 0.0
real_loss += calculate_3d_loss(tf.ones_like(p), p, loss_fn=self.mse_loss)
fake_loss += calculate_3d_loss(
tf.zeros_like(p_hat), p_hat, loss_fn=self.mse_loss
)
dis_loss = real_loss + fake_loss
# calculate per_example_losses and dict_metrics_losses
per_example_losses = dis_loss
dict_metrics_losses = {
"real_loss": real_loss,
"fake_loss": fake_loss,
"dis_loss": dis_loss,
}
return per_example_losses, dict_metrics_losses
def generate_and_save_intermediate_result(self, batch):
"""Generate and save intermediate result."""
import matplotlib.pyplot as plt
# generate
y_batch_ = self.one_step_predict(batch)
y_batch = batch["audios"]
utt_ids = batch["utt_ids"]
# convert to tensor.
# here we just take a sample at first replica.
try:
y_batch_ = y_batch_.values[0].numpy()
y_batch = y_batch.values[0].numpy()
utt_ids = utt_ids.values[0].numpy()
except Exception:
y_batch_ = y_batch_.numpy()
y_batch = y_batch.numpy()
utt_ids = utt_ids.numpy()
# check directory
dirname = os.path.join(self.config["outdir"], f"predictions/{self.steps}steps")
if not os.path.exists(dirname):
os.makedirs(dirname)
for idx, (y, y_) in enumerate(zip(y_batch, y_batch_), 0):
# convert to ndarray
y, y_ = tf.reshape(y, [-1]).numpy(), tf.reshape(y_, [-1]).numpy()
# plit figure and save it
utt_id = utt_ids[idx]
figname = os.path.join(dirname, f"{utt_id}.png")
plt.subplot(2, 1, 1)
plt.plot(y)
plt.title("groundtruth speech")
plt.subplot(2, 1, 2)
plt.plot(y_)
plt.title(f"generated speech @ {self.steps} steps")
plt.tight_layout()
plt.savefig(figname)
plt.close()
# save as wavefile
y = np.clip(y, -1, 1)
y_ = np.clip(y_, -1, 1)
sf.write(
figname.replace(".png", "_ref.wav"),
y,
self.config["sampling_rate"],
"PCM_16",
)
sf.write(
figname.replace(".png", "_gen.wav"),
y_,
self.config["sampling_rate"],
"PCM_16",
)
def main():
"""Run training process."""
parser = argparse.ArgumentParser(
description="Train ParallelWaveGan (See detail in tensorflow_tts/examples/parallel_wavegan/train_parallel_wavegan.py)"
)
parser.add_argument(
"--train-dir",
default=None,
type=str,
help="directory including training data. ",
)
parser.add_argument(
"--dev-dir",
default=None,
type=str,
help="directory including development data. ",
)
parser.add_argument(
"--use-norm", default=1, type=int, help="use norm mels for training or raw."
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save checkpoints."
)
parser.add_argument(
"--config", type=str, required=True, help="yaml format configuration file."
)
parser.add_argument(
"--resume",
default="",
type=str,
nargs="?",
help='checkpoint file path to resume training. (default="")',
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
parser.add_argument(
"--generator_mixed_precision",
default=0,
type=int,
help="using mixed precision for generator or not.",
)
parser.add_argument(
"--discriminator_mixed_precision",
default=0,
type=int,
help="using mixed precision for discriminator or not.",
)
args = parser.parse_args()
# return strategy
STRATEGY = return_strategy()
# set mixed precision config
if args.generator_mixed_precision == 1 or args.discriminator_mixed_precision == 1:
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": True})
args.generator_mixed_precision = bool(args.generator_mixed_precision)
args.discriminator_mixed_precision = bool(args.discriminator_mixed_precision)
args.use_norm = bool(args.use_norm)
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# check arguments
if args.train_dir is None:
raise ValueError("Please specify --train-dir")
if args.dev_dir is None:
raise ValueError("Please specify either --valid-dir")
# load and save config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
config["version"] = tensorflow_tts.__version__
with open(os.path.join(args.outdir, "config.yml"), "w") as f:
yaml.dump(config, f, Dumper=yaml.Dumper)
for key, value in config.items():
logging.info(f"{key} = {value}")
# get dataset
if config["remove_short_samples"]:
mel_length_threshold = config["batch_max_steps"] // config[
"hop_size"
] + 2 * config["parallel_wavegan_generator_params"].get("aux_context_window", 0)
else:
mel_length_threshold = None
if config["format"] == "npy":
audio_query = "*-wave.npy"
mel_query = "*-raw-feats.npy" if args.use_norm is False else "*-norm-feats.npy"
audio_load_fn = np.load
mel_load_fn = np.load
else:
raise ValueError("Only npy are supported.")
# define train/valid dataset
train_dataset = AudioMelDataset(
root_dir=args.train_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(config["batch_max_steps"], dtype=tf.int32),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"]
* STRATEGY.num_replicas_in_sync
* config["gradient_accumulation_steps"],
)
valid_dataset = AudioMelDataset(
root_dir=args.dev_dir,
audio_query=audio_query,
mel_query=mel_query,
audio_load_fn=audio_load_fn,
mel_load_fn=mel_load_fn,
mel_length_threshold=mel_length_threshold,
).create(
is_shuffle=config["is_shuffle"],
map_fn=lambda items: collater(
items,
batch_max_steps=tf.constant(
config["batch_max_steps_valid"], dtype=tf.int32
),
hop_size=tf.constant(config["hop_size"], dtype=tf.int32),
),
allow_cache=config["allow_cache"],
batch_size=config["batch_size"] * STRATEGY.num_replicas_in_sync,
)
# define trainer
trainer = ParallelWaveganTrainer(
steps=0,
epochs=0,
config=config,
strategy=STRATEGY,
is_generator_mixed_precision=args.generator_mixed_precision,
is_discriminator_mixed_precision=args.discriminator_mixed_precision,
)
with STRATEGY.scope():
# define generator and discriminator
generator = TFParallelWaveGANGenerator(
ParallelWaveGANGeneratorConfig(
**config["parallel_wavegan_generator_params"]
),
name="parallel_wavegan_generator",
)
discriminator = TFParallelWaveGANDiscriminator(
ParallelWaveGANDiscriminatorConfig(
**config["parallel_wavegan_discriminator_params"]
),
name="parallel_wavegan_discriminator",
)
# dummy input to build model.
fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)
y_hat = generator(fake_mels)
discriminator(y_hat)
generator.summary()
discriminator.summary()
# define optimizer
generator_lr_fn = getattr(
tf.keras.optimizers.schedules, config["generator_optimizer_params"]["lr_fn"]
)(**config["generator_optimizer_params"]["lr_params"])
discriminator_lr_fn = getattr(
tf.keras.optimizers.schedules,
config["discriminator_optimizer_params"]["lr_fn"],
)(**config["discriminator_optimizer_params"]["lr_params"])
gen_optimizer = RectifiedAdam(learning_rate=generator_lr_fn, amsgrad=False)
dis_optimizer = RectifiedAdam(learning_rate=discriminator_lr_fn, amsgrad=False)
trainer.compile(
gen_model=generator,
dis_model=discriminator,
gen_optimizer=gen_optimizer,
dis_optimizer=dis_optimizer,
)
# start training
try:
trainer.fit(
train_dataset,
valid_dataset,
saved_path=os.path.join(config["outdir"], "checkpoints/"),
resume=args.resume,
)
except KeyboardInterrupt:
trainer.save_checkpoint()
logging.info(f"Successfully saved checkpoint @ {trainer.steps}steps.")
if __name__ == "__main__":
main()
| 15,699 | 32.052632 | 126 | py |
TensorFlowTTS | TensorFlowTTS-master/examples/parallel_wavegan/decode_parallel_wavegan.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Decode trained Mb-Melgan from folder."""
import argparse
import logging
import os
import numpy as np
import soundfile as sf
import yaml
from tqdm import tqdm
from tensorflow_tts.configs import ParallelWaveGANGeneratorConfig
from tensorflow_tts.datasets import MelDataset
from tensorflow_tts.models import TFParallelWaveGANGenerator
def main():
"""Run parallel_wavegan decoding from folder."""
parser = argparse.ArgumentParser(
description="Generate Audio from melspectrogram with trained melgan "
"(See detail in examples/parallel_wavegan/decode_parallel_wavegan.py)."
)
parser.add_argument(
"--rootdir",
default=None,
type=str,
required=True,
help="directory including ids/durations files.",
)
parser.add_argument(
"--outdir", type=str, required=True, help="directory to save generated speech."
)
parser.add_argument(
"--checkpoint", type=str, required=True, help="checkpoint file to be loaded."
)
parser.add_argument(
"--use-norm", type=int, default=1, help="Use norm or raw melspectrogram."
)
parser.add_argument("--batch-size", type=int, default=8, help="batch_size.")
parser.add_argument(
"--config",
default=None,
type=str,
required=True,
help="yaml format configuration file. if not explicitly provided, "
"it will be searched in the checkpoint directory. (default=None)",
)
parser.add_argument(
"--verbose",
type=int,
default=1,
help="logging level. higher is more logging. (default=1)",
)
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
else:
logging.basicConfig(
level=logging.WARN,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s",
)
logging.warning("Skip DEBUG/INFO messages")
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# load config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
if config["format"] == "npy":
mel_query = "*-fs-after-feats.npy" if "fastspeech" in args.rootdir else "*-norm-feats.npy" if args.use_norm == 1 else "*-raw-feats.npy"
mel_load_fn = np.load
else:
raise ValueError("Only npy is supported.")
# define data-loader
dataset = MelDataset(
root_dir=args.rootdir,
mel_query=mel_query,
mel_load_fn=mel_load_fn,
)
dataset = dataset.create(batch_size=args.batch_size)
# define model and load checkpoint
parallel_wavegan = TFParallelWaveGANGenerator(
config=ParallelWaveGANGeneratorConfig(**config["parallel_wavegan_generator_params"]),
name="parallel_wavegan_generator",
)
parallel_wavegan._build()
parallel_wavegan.load_weights(args.checkpoint)
for data in tqdm(dataset, desc="[Decoding]"):
utt_ids, mels, mel_lengths = data["utt_ids"], data["mels"], data["mel_lengths"]
# pwgan inference.
generated_audios = parallel_wavegan.inference(mels)
# convert to numpy.
generated_audios = generated_audios.numpy() # [B, T]
# save to outdir
for i, audio in enumerate(generated_audios):
utt_id = utt_ids[i].numpy().decode("utf-8")
sf.write(
os.path.join(args.outdir, f"{utt_id}.wav"),
audio[: mel_lengths[i].numpy() * config["hop_size"]],
config["sampling_rate"],
"PCM_16",
)
if __name__ == "__main__":
main()
| 4,634 | 31.87234 | 143 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/__init__.py | __version__ = "0.0"
| 20 | 9.5 | 19 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/models/base_model.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Model for all model."""
import tensorflow as tf
import yaml
import os
import numpy as np
from tensorflow_tts.utils.utils import MODEL_FILE_NAME, CONFIG_FILE_NAME
class BaseModel(tf.keras.Model):
def set_config(self, config):
self.config = config
def save_pretrained(self, saved_path):
"""Save config and weights to file"""
os.makedirs(saved_path, exist_ok=True)
self.config.save_pretrained(saved_path)
self.save_weights(os.path.join(saved_path, MODEL_FILE_NAME))
| 1,131 | 32.294118 | 74 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/models/parallel_wavegan.py | # -*- coding: utf-8 -*-
# Copyright 2020 The TensorFlowTTS Team and Tomoki Hayashi (@kan-bayashi)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Parallel-wavegan Modules. Based on pytorch implementation (https://github.com/kan-bayashi/ParallelWaveGAN)"""
import tensorflow as tf
from tensorflow_tts.models import BaseModel
def get_initializer(initializer_seed=42):
"""Creates a `tf.initializers.he_normal` with the given seed.
Args:
initializer_seed: int, initializer seed.
Returns:
HeNormal initializer with seed = `initializer_seed`.
"""
return tf.keras.initializers.he_normal(seed=initializer_seed)
class TFConv1d1x1(tf.keras.layers.Conv1D):
"""1x1 Conv1d with customized initialization."""
def __init__(self, filters, use_bias, padding, initializer_seed, **kwargs):
"""Initialize 1x1 Conv1d module."""
super().__init__(
filters=filters,
kernel_size=1,
strides=1,
padding=padding,
dilation_rate=1,
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed),
**kwargs,
)
class TFConv1d(tf.keras.layers.Conv1D):
"""Conv1d with customized initialization."""
def __init__(self, *args, **kwargs):
"""Initialize Conv1d module."""
initializer_seed = kwargs.pop("initializer_seed", 42)
super().__init__(
*args, **kwargs, kernel_initializer=get_initializer(initializer_seed)
)
class TFResidualBlock(tf.keras.layers.Layer):
"""Residual block module in WaveNet."""
def __init__(
self,
kernel_size=3,
residual_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=80,
dropout_rate=0.0,
dilation_rate=1,
use_bias=True,
use_causal_conv=False,
initializer_seed=42,
**kwargs,
):
"""Initialize ResidualBlock module.
Args:
kernel_size (int): Kernel size of dilation convolution layer.
residual_channels (int): Number of channels for residual connection.
skip_channels (int): Number of channels for skip connection.
aux_channels (int): Local conditioning channels i.e. auxiliary input dimension.
dropout_rate (float): Dropout probability.
dilation_rate (int): Dilation factor.
use_bias (bool): Whether to add bias parameter in convolution layers.
use_causal_conv (bool): Whether to use use_causal_conv or non-use_causal_conv convolution.
initializer_seed (int32): initializer seed.
"""
super().__init__(**kwargs)
self.dropout_rate = dropout_rate
# no future time stamps available
self.use_causal_conv = use_causal_conv
# dilation conv
self.conv = TFConv1d(
filters=gate_channels,
kernel_size=kernel_size,
padding="same" if self.use_causal_conv is False else "causal",
strides=1,
dilation_rate=dilation_rate,
use_bias=use_bias,
initializer_seed=initializer_seed,
)
# local conditionong
if aux_channels > 0:
self.conv1x1_aux = TFConv1d1x1(
gate_channels,
use_bias=False,
padding="same",
initializer_seed=initializer_seed,
name="conv1x1_aux",
)
else:
self.conv1x1_aux = None
# conv output is split into two groups
gate_out_channels = gate_channels // 2
self.conv1x1_out = TFConv1d1x1(
residual_channels,
use_bias=use_bias,
padding="same",
initializer_seed=initializer_seed,
name="conv1x1_out",
)
self.conv1x1_skip = TFConv1d1x1(
skip_channels,
use_bias=use_bias,
padding="same",
initializer_seed=initializer_seed,
name="conv1x1_skip",
)
self.dropout = tf.keras.layers.Dropout(rate=self.dropout_rate)
def call(self, x, c, training=False):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, residual_channels, T).
c (Tensor): Local conditioning auxiliary tensor (B, aux_channels, T).
Returns:
Tensor: Output tensor for residual connection (B, T, residual_channels).
Tensor: Output tensor for skip connection (B, T, skip_channels).
"""
residual = x
x = self.dropout(x, training=training)
x = self.conv(x)
# split into two part for gated activation
xa, xb = tf.split(x, 2, axis=-1)
# local conditioning
if c is not None:
assert self.conv1x1_aux is not None
c = self.conv1x1_aux(c)
ca, cb = tf.split(c, 2, axis=-1)
xa, xb = xa + ca, xb + cb
x = tf.nn.tanh(xa) * tf.nn.sigmoid(xb)
# for skip connection
s = self.conv1x1_skip(x)
# for residual connection
x = self.conv1x1_out(x)
x = (x + residual) * tf.math.sqrt(0.5)
return x, s
class TFStretch1d(tf.keras.layers.Layer):
"""Stretch2d module."""
def __init__(self, x_scale, y_scale, method="nearest", **kwargs):
"""Initialize Stretch2d module.
Args:
x_scale (int): X scaling factor (Time axis in spectrogram).
y_scale (int): Y scaling factor (Frequency axis in spectrogram).
method (str): Interpolation method.
"""
super().__init__(**kwargs)
self.x_scale = x_scale
self.y_scale = y_scale
self.method = method
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C, 1).
Returns:
Tensor: Interpolated tensor (B, T * x_scale, C * y_scale, 1)
"""
x_shape = tf.shape(x)
new_size = (x_shape[1] * self.x_scale, x_shape[2] * self.y_scale)
x = tf.image.resize(x, method=self.method, size=new_size)
return x
class TFUpsampleNetWork(tf.keras.layers.Layer):
"""Upsampling network module."""
def __init__(
self,
output_channels,
upsample_scales,
nonlinear_activation=None,
nonlinear_activation_params={},
interpolate_mode="nearest",
freq_axis_kernel_size=1,
use_causal_conv=False,
**kwargs,
):
"""Initialize upsampling network module.
Args:
output_channels (int): output feature channels.
upsample_scales (list): List of upsampling scales.
nonlinear_activation (str): Activation function name.
nonlinear_activation_params (dict): Arguments for specified activation function.
interpolate_mode (str): Interpolation mode.
freq_axis_kernel_size (int): Kernel size in the direction of frequency axis.
"""
super().__init__(**kwargs)
self.use_causal_conv = use_causal_conv
self.up_layers = []
for scale in upsample_scales:
# interpolation layer
stretch = TFStretch1d(
scale, 1, interpolate_mode, name="stretch_._{}".format(scale)
) # ->> outputs: [B, T * scale, C * 1, 1]
self.up_layers += [stretch]
# conv layer
assert (
freq_axis_kernel_size - 1
) % 2 == 0, "Not support even number freq axis kernel size."
kernel_size = scale * 2 + 1
conv = tf.keras.layers.Conv2D(
filters=1,
kernel_size=(kernel_size, freq_axis_kernel_size),
padding="causal" if self.use_causal_conv is True else "same",
use_bias=False,
) # ->> outputs: [B, T * scale, C * 1, 1]
self.up_layers += [conv]
# nonlinear
if nonlinear_activation is not None:
nonlinear = getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
)
self.up_layers += [nonlinear]
def call(self, c):
"""Calculate forward propagation.
Args:
c : Input tensor (B, T, C).
Returns:
Tensor: Upsampled tensor (B, T', C), where T' = T * prod(upsample_scales).
"""
c = tf.expand_dims(c, -1) # [B, T, C, 1]
for f in self.up_layers:
c = f(c)
return tf.squeeze(c, -1) # [B, T, C]
class TFConvInUpsampleNetWork(tf.keras.layers.Layer):
"""Convolution + upsampling network module."""
def __init__(
self,
upsample_scales,
nonlinear_activation=None,
nonlinear_activation_params={},
interpolate_mode="nearest",
freq_axis_kernel_size=1,
aux_channels=80,
aux_context_window=0,
use_causal_conv=False,
initializer_seed=42,
**kwargs,
):
"""Initialize convolution + upsampling network module.
Args:
upsample_scales (list): List of upsampling scales.
nonlinear_activation (str): Activation function name.
nonlinear_activation_params (dict): Arguments for specified activation function.
mode (str): Interpolation mode.
freq_axis_kernel_size (int): Kernel size in the direction of frequency axis.
aux_channels (int): Number of channels of pre-convolutional layer.
aux_context_window (int): Context window size of the pre-convolutional layer.
use_causal_conv (bool): Whether to use causal structure.
"""
super().__init__(**kwargs)
self.aux_context_window = aux_context_window
self.use_causal_conv = use_causal_conv and aux_context_window > 0
# To capture wide-context information in conditional features
kernel_size = (
aux_context_window + 1 if use_causal_conv else 2 * aux_context_window + 1
)
self.conv_in = TFConv1d(
filters=aux_channels,
kernel_size=kernel_size,
padding="same",
use_bias=False,
initializer_seed=initializer_seed,
name="conv_in",
)
self.upsample = TFUpsampleNetWork(
output_channels=aux_channels,
upsample_scales=upsample_scales,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
interpolate_mode=interpolate_mode,
freq_axis_kernel_size=freq_axis_kernel_size,
use_causal_conv=use_causal_conv,
name="upsample_network",
)
def call(self, c):
"""Calculate forward propagation.
Args:
c : Input tensor (B, T', C).
Returns:
Tensor: Upsampled tensor (B, T, C),
where T = (T' - aux_context_window * 2) * prod(upsample_scales).
Note:
The length of inputs considers the context window size.
"""
c_ = self.conv_in(c)
return self.upsample(c_)
class TFParallelWaveGANGenerator(BaseModel):
"""Parallel WaveGAN Generator module."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.out_channels = config.out_channels
self.aux_channels = config.aux_channels
self.n_layers = config.n_layers
self.stacks = config.stacks
self.kernel_size = config.kernel_size
self.upsample_params = config.upsample_params
# check the number of layers and stacks
assert self.n_layers % self.stacks == 0
n_layers_per_stack = self.n_layers // self.stacks
# define first convolution
self.first_conv = TFConv1d1x1(
filters=config.residual_channels,
use_bias=True,
padding="same",
initializer_seed=config.initializer_seed,
name="first_convolution",
)
# define conv + upsampling network
if config.upsample_conditional_features:
self.upsample_params.update({"use_causal_conv": config.use_causal_conv})
self.upsample_params.update(
{
"aux_channels": config.aux_channels,
"aux_context_window": config.aux_context_window,
}
)
self.upsample_net = TFConvInUpsampleNetWork(**self.upsample_params)
else:
self.upsample_net = None
# define residual blocks
self.conv_layers = []
for layer in range(self.n_layers):
dilation_rate = 2 ** (layer % n_layers_per_stack)
conv = TFResidualBlock(
kernel_size=config.kernel_size,
residual_channels=config.residual_channels,
gate_channels=config.gate_channels,
skip_channels=config.skip_channels,
aux_channels=config.aux_channels,
dilation_rate=dilation_rate,
dropout_rate=config.dropout_rate,
use_bias=config.use_bias,
use_causal_conv=config.use_causal_conv,
initializer_seed=config.initializer_seed,
name="residual_block_._{}".format(layer),
)
self.conv_layers += [conv]
# define output layers
self.last_conv_layers = [
tf.keras.layers.ReLU(),
TFConv1d1x1(
filters=config.skip_channels,
use_bias=config.use_bias,
padding="same",
initializer_seed=config.initializer_seed,
),
tf.keras.layers.ReLU(),
TFConv1d1x1(
filters=config.out_channels,
use_bias=True,
padding="same",
initializer_seed=config.initializer_seed,
),
tf.keras.layers.Activation("tanh"),
]
def _build(self):
mels = tf.random.uniform(shape=[2, 20, 80], dtype=tf.float32)
self(mels, training=tf.cast(True, tf.bool))
def call(self, mels, training=False, **kwargs):
"""Calculate forward propagation.
Args:
mels (Tensor): Local conditioning auxiliary features (B, T', C).
Returns:
Tensor: Output tensor (B, T, 1)
"""
# perform upsampling
if mels is not None and self.upsample_net is not None:
c = self.upsample_net(mels)
# random noise x
# enccode to hidden representation
x = tf.expand_dims(tf.random.normal(shape=tf.shape(c)[0:2]), axis=2)
x = self.first_conv(x)
skips = 0
for f in self.conv_layers:
x, h = f(x, c, training=training)
skips += h
skips *= tf.math.sqrt(1.0 / len(self.conv_layers))
# apply final layers
x = skips
for f in self.last_conv_layers:
x = f(x)
return x
@tf.function(
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec(shape=[None, None, 80], dtype=tf.float32, name="mels"),
],
)
def inference(self, mels):
"""Calculate forward propagation.
Args:
c (Tensor): Local conditioning auxiliary features (B, T', C).
Returns:
Tensor: Output tensor (B, T, 1)
"""
# perform upsampling
if mels is not None and self.upsample_net is not None:
c = self.upsample_net(mels)
# enccode to hidden representation
x = tf.expand_dims(tf.random.normal(shape=tf.shape(c)[0:2]), axis=2)
x = self.first_conv(x)
skips = 0
for f in self.conv_layers:
x, h = f(x, c, training=False)
skips += h
skips *= tf.math.sqrt(1.0 / len(self.conv_layers))
# apply final layers
x = skips
for f in self.last_conv_layers:
x = f(x)
return x
class TFParallelWaveGANDiscriminator(BaseModel):
"""Parallel WaveGAN Discriminator module."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
assert (config.kernel_size - 1) % 2 == 0, "Not support even number kernel size."
assert config.dilation_factor > 0, "Dilation factor must be > 0."
self.conv_layers = []
for i in range(config.n_layers - 1):
if i == 0:
dilation_rate = 1
else:
dilation_rate = (
i if config.dilation_factor == 1 else config.dilation_factor ** i
)
self.conv_layers += [
TFConv1d(
filters=config.conv_channels,
kernel_size=config.kernel_size,
padding="same",
dilation_rate=dilation_rate,
use_bias=config.use_bias,
initializer_seed=config.initializer_seed,
)
]
self.conv_layers += [
getattr(tf.keras.layers, config.nonlinear_activation)(
**config.nonlinear_activation_params
)
]
self.conv_layers += [
TFConv1d(
filters=config.out_channels,
kernel_size=config.kernel_size,
padding="same",
use_bias=config.use_bias,
initializer_seed=config.initializer_seed,
)
]
if config.apply_sigmoid_at_last:
self.conv_layers += [
tf.keras.layers.Activation("sigmoid"),
]
def _build(self):
x = tf.random.uniform(shape=[2, 16000, 1])
self(x)
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, T, 1).
Returns:
Tensor: Output tensor (B, T, 1)
"""
for f in self.conv_layers:
x = f(x)
return x
| 18,663 | 32.508079 | 112 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/models/melgan.py | # -*- coding: utf-8 -*-
# Copyright 2020 The MelGAN Authors and Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MelGAN Modules."""
import numpy as np
import tensorflow as tf
from tensorflow_tts.models import BaseModel
from tensorflow_tts.utils import GroupConv1D, WeightNormalization
def get_initializer(initializer_seed=42):
"""Creates a `tf.initializers.glorot_normal` with the given seed.
Args:
initializer_seed: int, initializer seed.
Returns:
GlorotNormal initializer with seed = `initializer_seed`.
"""
return tf.keras.initializers.GlorotNormal(seed=initializer_seed)
class TFReflectionPad1d(tf.keras.layers.Layer):
"""Tensorflow ReflectionPad1d module."""
def __init__(self, padding_size, padding_type="REFLECT", **kwargs):
"""Initialize TFReflectionPad1d module.
Args:
padding_size (int)
padding_type (str) ("CONSTANT", "REFLECT", or "SYMMETRIC". Default is "REFLECT")
"""
super().__init__(**kwargs)
self.padding_size = padding_size
self.padding_type = padding_type
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Padded tensor (B, T + 2 * padding_size, C).
"""
return tf.pad(
x,
[[0, 0], [self.padding_size, self.padding_size], [0, 0]],
self.padding_type,
)
class TFConvTranspose1d(tf.keras.layers.Layer):
"""Tensorflow ConvTranspose1d module."""
def __init__(
self,
filters,
kernel_size,
strides,
padding,
is_weight_norm,
initializer_seed,
**kwargs
):
"""Initialize TFConvTranspose1d( module.
Args:
filters (int): Number of filters.
kernel_size (int): kernel size.
strides (int): Stride width.
padding (str): Padding type ("same" or "valid").
"""
super().__init__(**kwargs)
self.conv1d_transpose = tf.keras.layers.Conv2DTranspose(
filters=filters,
kernel_size=(kernel_size, 1),
strides=(strides, 1),
padding="same",
kernel_initializer=get_initializer(initializer_seed),
)
if is_weight_norm:
self.conv1d_transpose = WeightNormalization(self.conv1d_transpose)
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Output tensor (B, T', C').
"""
x = tf.expand_dims(x, 2)
x = self.conv1d_transpose(x)
x = tf.squeeze(x, 2)
return x
class TFResidualStack(tf.keras.layers.Layer):
"""Tensorflow ResidualStack module."""
def __init__(
self,
kernel_size,
filters,
dilation_rate,
use_bias,
nonlinear_activation,
nonlinear_activation_params,
is_weight_norm,
initializer_seed,
**kwargs
):
"""Initialize TFResidualStack module.
Args:
kernel_size (int): Kernel size.
filters (int): Number of filters.
dilation_rate (int): Dilation rate.
use_bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
"""
super().__init__(**kwargs)
self.blocks = [
getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
),
TFReflectionPad1d((kernel_size - 1) // 2 * dilation_rate),
tf.keras.layers.Conv1D(
filters=filters,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed),
),
getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
),
tf.keras.layers.Conv1D(
filters=filters,
kernel_size=1,
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed),
),
]
self.shortcut = tf.keras.layers.Conv1D(
filters=filters,
kernel_size=1,
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed),
name="shortcut",
)
# apply weightnorm
if is_weight_norm:
self._apply_weightnorm(self.blocks)
self.shortcut = WeightNormalization(self.shortcut)
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Output tensor (B, T, C).
"""
_x = tf.identity(x)
for layer in self.blocks:
_x = layer(_x)
shortcut = self.shortcut(x)
return shortcut + _x
def _apply_weightnorm(self, list_layers):
"""Try apply weightnorm for all layer in list_layers."""
for i in range(len(list_layers)):
try:
layer_name = list_layers[i].name.lower()
if "conv1d" in layer_name or "dense" in layer_name:
list_layers[i] = WeightNormalization(list_layers[i])
except Exception:
pass
class TFMelGANGenerator(BaseModel):
"""Tensorflow MelGAN generator module."""
def __init__(self, config, **kwargs):
"""Initialize TFMelGANGenerator module.
Args:
config: config object of Melgan generator.
"""
super().__init__(**kwargs)
# check hyper parameter is valid or not
assert config.filters >= np.prod(config.upsample_scales)
assert config.filters % (2 ** len(config.upsample_scales)) == 0
# add initial layer
layers = []
layers += [
TFReflectionPad1d(
(config.kernel_size - 1) // 2,
padding_type=config.padding_type,
name="first_reflect_padding",
),
tf.keras.layers.Conv1D(
filters=config.filters,
kernel_size=config.kernel_size,
use_bias=config.use_bias,
kernel_initializer=get_initializer(config.initializer_seed),
),
]
for i, upsample_scale in enumerate(config.upsample_scales):
# add upsampling layer
layers += [
getattr(tf.keras.layers, config.nonlinear_activation)(
**config.nonlinear_activation_params
),
TFConvTranspose1d(
filters=config.filters // (2 ** (i + 1)),
kernel_size=upsample_scale * 2,
strides=upsample_scale,
padding="same",
is_weight_norm=config.is_weight_norm,
initializer_seed=config.initializer_seed,
name="conv_transpose_._{}".format(i),
),
]
# ad residual stack layer
for j in range(config.stacks):
layers += [
TFResidualStack(
kernel_size=config.stack_kernel_size,
filters=config.filters // (2 ** (i + 1)),
dilation_rate=config.stack_kernel_size ** j,
use_bias=config.use_bias,
nonlinear_activation=config.nonlinear_activation,
nonlinear_activation_params=config.nonlinear_activation_params,
is_weight_norm=config.is_weight_norm,
initializer_seed=config.initializer_seed,
name="residual_stack_._{}._._{}".format(i, j),
)
]
# add final layer
layers += [
getattr(tf.keras.layers, config.nonlinear_activation)(
**config.nonlinear_activation_params
),
TFReflectionPad1d(
(config.kernel_size - 1) // 2,
padding_type=config.padding_type,
name="last_reflect_padding",
),
tf.keras.layers.Conv1D(
filters=config.out_channels,
kernel_size=config.kernel_size,
use_bias=config.use_bias,
kernel_initializer=get_initializer(config.initializer_seed),
dtype=tf.float32,
),
]
if config.use_final_nolinear_activation:
layers += [tf.keras.layers.Activation("tanh", dtype=tf.float32)]
if config.is_weight_norm is True:
self._apply_weightnorm(layers)
self.melgan = tf.keras.models.Sequential(layers)
def call(self, mels, **kwargs):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, T, channels)
Returns:
Tensor: Output tensor (B, T ** prod(upsample_scales), out_channels)
"""
return self.inference(mels)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[None, None, 80], dtype=tf.float32, name="mels")
]
)
def inference(self, mels):
return self.melgan(mels)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[1, None, 80], dtype=tf.float32, name="mels")
]
)
def inference_tflite(self, mels):
return self.melgan(mels)
def _apply_weightnorm(self, list_layers):
"""Try apply weightnorm for all layer in list_layers."""
for i in range(len(list_layers)):
try:
layer_name = list_layers[i].name.lower()
if "conv1d" in layer_name or "dense" in layer_name:
list_layers[i] = WeightNormalization(list_layers[i])
except Exception:
pass
def _build(self):
"""Build model by passing fake input."""
fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)
self(fake_mels)
class TFMelGANDiscriminator(tf.keras.layers.Layer):
"""Tensorflow MelGAN generator module."""
def __init__(
self,
out_channels=1,
kernel_sizes=[5, 3],
filters=16,
max_downsample_filters=1024,
use_bias=True,
downsample_scales=[4, 4, 4, 4],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
padding_type="REFLECT",
is_weight_norm=True,
initializer_seed=0.02,
**kwargs
):
"""Initilize MelGAN discriminator module.
Args:
out_channels (int): Number of output channels.
kernel_sizes (list): List of two kernel sizes. The prod will be used for the first conv layer,
and the first and the second kernel sizes will be used for the last two layers.
For example if kernel_sizes = [5, 3], the first layer kernel size will be 5 * 3 = 15.
the last two layers' kernel size will be 5 and 3, respectively.
filters (int): Initial number of filters for conv layer.
max_downsample_filters (int): Maximum number of filters for downsampling layers.
use_bias (bool): Whether to add bias parameter in convolution layers.
downsample_scales (list): List of downsampling scales.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
padding_type (str): Padding type (support only "REFLECT", "CONSTANT", "SYMMETRIC")
"""
super().__init__(**kwargs)
discriminator = []
# check kernel_size is valid
assert len(kernel_sizes) == 2
assert kernel_sizes[0] % 2 == 1
assert kernel_sizes[1] % 2 == 1
# add first layer
discriminator = [
TFReflectionPad1d(
(np.prod(kernel_sizes) - 1) // 2, padding_type=padding_type
),
tf.keras.layers.Conv1D(
filters=filters,
kernel_size=int(np.prod(kernel_sizes)),
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed),
),
getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
),
]
# add downsample layers
in_chs = filters
with tf.keras.utils.CustomObjectScope({"GroupConv1D": GroupConv1D}):
for downsample_scale in downsample_scales:
out_chs = min(in_chs * downsample_scale, max_downsample_filters)
discriminator += [
GroupConv1D(
filters=out_chs,
kernel_size=downsample_scale * 10 + 1,
strides=downsample_scale,
padding="same",
use_bias=use_bias,
groups=in_chs // 4,
kernel_initializer=get_initializer(initializer_seed),
)
]
discriminator += [
getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
)
]
in_chs = out_chs
# add final layers
out_chs = min(in_chs * 2, max_downsample_filters)
discriminator += [
tf.keras.layers.Conv1D(
filters=out_chs,
kernel_size=kernel_sizes[0],
padding="same",
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed),
)
]
discriminator += [
getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
)
]
discriminator += [
tf.keras.layers.Conv1D(
filters=out_channels,
kernel_size=kernel_sizes[1],
padding="same",
use_bias=use_bias,
kernel_initializer=get_initializer(initializer_seed),
)
]
if is_weight_norm is True:
self._apply_weightnorm(discriminator)
self.disciminator = discriminator
def call(self, x, **kwargs):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, T, 1).
Returns:
List: List of output tensors of each layer.
"""
outs = []
for f in self.disciminator:
x = f(x)
outs += [x]
return outs
def _apply_weightnorm(self, list_layers):
"""Try apply weightnorm for all layer in list_layers."""
for i in range(len(list_layers)):
try:
layer_name = list_layers[i].name.lower()
if "conv1d" in layer_name or "dense" in layer_name:
list_layers[i] = WeightNormalization(list_layers[i])
except Exception:
pass
class TFMelGANMultiScaleDiscriminator(BaseModel):
"""MelGAN multi-scale discriminator module."""
def __init__(self, config, **kwargs):
"""Initilize MelGAN multi-scale discriminator module.
Args:
config: config object for melgan discriminator
"""
super().__init__(**kwargs)
self.discriminator = []
# add discriminator
for i in range(config.scales):
self.discriminator += [
TFMelGANDiscriminator(
out_channels=config.out_channels,
kernel_sizes=config.kernel_sizes,
filters=config.filters,
max_downsample_filters=config.max_downsample_filters,
use_bias=config.use_bias,
downsample_scales=config.downsample_scales,
nonlinear_activation=config.nonlinear_activation,
nonlinear_activation_params=config.nonlinear_activation_params,
padding_type=config.padding_type,
is_weight_norm=config.is_weight_norm,
initializer_seed=config.initializer_seed,
name="melgan_discriminator_scale_._{}".format(i),
)
]
self.pooling = getattr(tf.keras.layers, config.downsample_pooling)(
**config.downsample_pooling_params
)
def call(self, x, **kwargs):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, T, 1).
Returns:
List: List of list of each discriminator outputs, which consists of each layer output tensors.
"""
outs = []
for f in self.discriminator:
outs += [f(x)]
x = self.pooling(x)
return outs
| 17,807 | 34.687375 | 106 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/models/tacotron2.py | # -*- coding: utf-8 -*-
# Copyright 2020 The Tacotron-2 Authors, Minh Nguyen (@dathudeptrai), Eren Gölge (@erogol) and Jae Yoo (@jaeyoo)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tacotron-2 Modules."""
import collections
import numpy as np
import tensorflow as tf
# TODO: once https://github.com/tensorflow/addons/pull/1964 is fixed,
# uncomment this line.
# from tensorflow_addons.seq2seq import dynamic_decode
from tensorflow_addons.seq2seq import BahdanauAttention, Decoder, Sampler
from tensorflow_tts.utils import dynamic_decode
from tensorflow_tts.models import BaseModel
def get_initializer(initializer_range=0.02):
"""Creates a `tf.initializers.truncated_normal` with the given range.
Args:
initializer_range: float, initializer range for stddev.
Returns:
TruncatedNormal initializer with stddev = `initializer_range`.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
def gelu(x):
"""Gaussian Error Linear unit."""
cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))
return x * cdf
def gelu_new(x):
"""Smoother gaussian Error Linear Unit."""
cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def swish(x):
"""Swish activation function."""
return tf.nn.swish(x)
def mish(x):
return x * tf.math.tanh(tf.math.softplus(x))
ACT2FN = {
"identity": tf.keras.layers.Activation("linear"),
"tanh": tf.keras.layers.Activation("tanh"),
"gelu": tf.keras.layers.Activation(gelu),
"relu": tf.keras.activations.relu,
"swish": tf.keras.layers.Activation(swish),
"gelu_new": tf.keras.layers.Activation(gelu_new),
"mish": tf.keras.layers.Activation(mish),
}
class TFEmbedding(tf.keras.layers.Embedding):
"""Faster version of embedding."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def call(self, inputs):
inputs = tf.cast(tf.expand_dims(inputs, -1), tf.int32)
outputs = tf.gather_nd(self.embeddings, inputs)
return outputs
class TFTacotronConvBatchNorm(tf.keras.layers.Layer):
"""Tacotron-2 Convolutional Batchnorm module."""
def __init__(
self, filters, kernel_size, dropout_rate, activation=None, name_idx=None
):
super().__init__()
self.conv1d = tf.keras.layers.Conv1D(
filters,
kernel_size,
kernel_initializer=get_initializer(0.02),
padding="same",
name="conv_._{}".format(name_idx),
)
self.norm = tf.keras.layers.experimental.SyncBatchNormalization(
axis=-1, name="batch_norm_._{}".format(name_idx)
)
self.dropout = tf.keras.layers.Dropout(
rate=dropout_rate, name="dropout_._{}".format(name_idx)
)
self.act = ACT2FN[activation]
def call(self, inputs, training=False):
outputs = self.conv1d(inputs)
outputs = self.norm(outputs, training=training)
outputs = self.act(outputs)
outputs = self.dropout(outputs, training=training)
return outputs
class TFTacotronEmbeddings(tf.keras.layers.Layer):
"""Construct character/phoneme/positional/speaker embeddings."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.embedding_hidden_size = config.embedding_hidden_size
self.initializer_range = config.initializer_range
self.config = config
if config.n_speakers > 1:
self.speaker_embeddings = TFEmbedding(
config.n_speakers,
config.embedding_hidden_size,
embeddings_initializer=get_initializer(self.initializer_range),
name="speaker_embeddings",
)
self.speaker_fc = tf.keras.layers.Dense(
units=config.embedding_hidden_size, name="speaker_fc"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="LayerNorm"
)
self.dropout = tf.keras.layers.Dropout(config.embedding_dropout_prob)
def build(self, input_shape):
"""Build shared character/phoneme embedding layers."""
with tf.name_scope("character_embeddings"):
self.character_embeddings = self.add_weight(
"weight",
shape=[self.vocab_size, self.embedding_hidden_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
def call(self, inputs, training=False):
"""Get character embeddings of inputs.
Args:
1. character, Tensor (int32) shape [batch_size, length].
2. speaker_id, Tensor (int32) shape [batch_size]
Returns:
Tensor (float32) shape [batch_size, length, embedding_size].
"""
return self._embedding(inputs, training=training)
def _embedding(self, inputs, training=False):
"""Applies embedding based on inputs tensor."""
input_ids, speaker_ids = inputs
# create embeddings
inputs_embeds = tf.gather(self.character_embeddings, input_ids)
embeddings = inputs_embeds
if self.config.n_speakers > 1:
speaker_embeddings = self.speaker_embeddings(speaker_ids)
speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings))
# extended speaker embeddings
extended_speaker_features = speaker_features[:, tf.newaxis, :]
# sum all embedding
embeddings += extended_speaker_features
# apply layer-norm and dropout for embeddings.
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings, training=training)
return embeddings
class TFTacotronEncoderConvs(tf.keras.layers.Layer):
"""Tacotron-2 Encoder Convolutional Batchnorm module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_batch_norm = []
for i in range(config.n_conv_encoder):
conv = TFTacotronConvBatchNorm(
filters=config.encoder_conv_filters,
kernel_size=config.encoder_conv_kernel_sizes,
activation=config.encoder_conv_activation,
dropout_rate=config.encoder_conv_dropout_rate,
name_idx=i,
)
self.conv_batch_norm.append(conv)
def call(self, inputs, training=False):
"""Call logic."""
outputs = inputs
for conv in self.conv_batch_norm:
outputs = conv(outputs, training=training)
return outputs
class TFTacotronEncoder(tf.keras.layers.Layer):
"""Tacotron-2 Encoder."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.embeddings = TFTacotronEmbeddings(config, name="embeddings")
self.convbn = TFTacotronEncoderConvs(config, name="conv_batch_norm")
self.bilstm = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(
units=config.encoder_lstm_units, return_sequences=True
),
name="bilstm",
)
if config.n_speakers > 1:
self.encoder_speaker_embeddings = TFEmbedding(
config.n_speakers,
config.embedding_hidden_size,
embeddings_initializer=get_initializer(config.initializer_range),
name="encoder_speaker_embeddings",
)
self.encoder_speaker_fc = tf.keras.layers.Dense(
units=config.encoder_lstm_units * 2, name="encoder_speaker_fc"
)
self.config = config
def call(self, inputs, training=False):
"""Call logic."""
input_ids, speaker_ids, input_mask = inputs
# create embedding and mask them since we sum
# speaker embedding to all character embedding.
input_embeddings = self.embeddings([input_ids, speaker_ids], training=training)
# pass embeddings to convolution batch norm
conv_outputs = self.convbn(input_embeddings, training=training)
# bi-lstm.
outputs = self.bilstm(conv_outputs, mask=input_mask)
if self.config.n_speakers > 1:
encoder_speaker_embeddings = self.encoder_speaker_embeddings(speaker_ids)
encoder_speaker_features = tf.math.softplus(
self.encoder_speaker_fc(encoder_speaker_embeddings)
)
# extended encoderspeaker embeddings
extended_encoder_speaker_features = encoder_speaker_features[
:, tf.newaxis, :
]
# sum to encoder outputs
outputs += extended_encoder_speaker_features
return outputs
class Tacotron2Sampler(Sampler):
"""Tacotron2 sampler for Seq2Seq training."""
def __init__(
self, config,
):
super().__init__()
self.config = config
# create schedule factor.
# the input of a next decoder cell is calculated by formular:
# next_inputs = ratio * prev_groundtruth_outputs + (1.0 - ratio) * prev_predicted_outputs.
self._ratio = tf.constant(1.0, dtype=tf.float32)
self._reduction_factor = self.config.reduction_factor
def setup_target(self, targets, mel_lengths):
"""Setup ground-truth mel outputs for decoder."""
self.mel_lengths = mel_lengths
self.set_batch_size(tf.shape(targets)[0])
self.targets = targets[
:, self._reduction_factor - 1 :: self._reduction_factor, :
]
self.max_lengths = tf.tile([tf.shape(self.targets)[1]], [self._batch_size])
@property
def batch_size(self):
return self._batch_size
@property
def sample_ids_shape(self):
return tf.TensorShape([])
@property
def sample_ids_dtype(self):
return tf.int32
@property
def reduction_factor(self):
return self._reduction_factor
def initialize(self):
"""Return (Finished, next_inputs)."""
return (
tf.tile([False], [self._batch_size]),
tf.tile([[0.0]], [self._batch_size, self.config.n_mels]),
)
def sample(self, time, outputs, state):
return tf.tile([0], [self._batch_size])
def next_inputs(
self,
time,
outputs,
state,
sample_ids,
stop_token_prediction,
training=False,
**kwargs,
):
if training:
finished = time + 1 >= self.max_lengths
next_inputs = (
self._ratio * self.targets[:, time, :]
+ (1.0 - self._ratio) * outputs[:, -self.config.n_mels :]
)
next_state = state
return (finished, next_inputs, next_state)
else:
stop_token_prediction = tf.nn.sigmoid(stop_token_prediction)
finished = tf.cast(tf.round(stop_token_prediction), tf.bool)
finished = tf.reduce_all(finished)
next_inputs = outputs[:, -self.config.n_mels :]
next_state = state
return (finished, next_inputs, next_state)
def set_batch_size(self, batch_size):
self._batch_size = batch_size
class TFTacotronLocationSensitiveAttention(BahdanauAttention):
"""Tacotron-2 Location Sensitive Attention module."""
def __init__(
self,
config,
memory,
mask_encoder=True,
memory_sequence_length=None,
is_cumulate=True,
):
"""Init variables."""
memory_length = memory_sequence_length if (mask_encoder is True) else None
super().__init__(
units=config.attention_dim,
memory=memory,
memory_sequence_length=memory_length,
probability_fn="softmax",
name="LocationSensitiveAttention",
)
self.location_convolution = tf.keras.layers.Conv1D(
filters=config.attention_filters,
kernel_size=config.attention_kernel,
padding="same",
use_bias=False,
name="location_conv",
)
self.location_layer = tf.keras.layers.Dense(
units=config.attention_dim, use_bias=False, name="location_layer"
)
self.v = tf.keras.layers.Dense(1, use_bias=True, name="scores_attention")
self.config = config
self.is_cumulate = is_cumulate
self.use_window = False
def setup_window(self, win_front=2, win_back=4):
self.win_front = tf.constant(win_front, tf.int32)
self.win_back = tf.constant(win_back, tf.int32)
self._indices = tf.expand_dims(tf.range(tf.shape(self.keys)[1]), 0)
self._indices = tf.tile(
self._indices, [tf.shape(self.keys)[0], 1]
) # [batch_size, max_time]
self.use_window = True
def _compute_window_mask(self, max_alignments):
"""Compute window mask for inference.
Args:
max_alignments (int): [batch_size]
"""
expanded_max_alignments = tf.expand_dims(max_alignments, 1) # [batch_size, 1]
low = expanded_max_alignments - self.win_front
high = expanded_max_alignments + self.win_back
mlow = tf.cast((self._indices < low), tf.float32)
mhigh = tf.cast((self._indices > high), tf.float32)
mask = mlow + mhigh
return mask # [batch_size, max_length]
def __call__(self, inputs, training=False):
query, state, prev_max_alignments = inputs
processed_query = self.query_layer(query) if self.query_layer else query
processed_query = tf.expand_dims(processed_query, 1)
expanded_alignments = tf.expand_dims(state, axis=2)
f = self.location_convolution(expanded_alignments)
processed_location_features = self.location_layer(f)
energy = self._location_sensitive_score(
processed_query, processed_location_features, self.keys
)
# mask energy on inference steps.
if self.use_window is True:
window_mask = self._compute_window_mask(prev_max_alignments)
energy = energy + window_mask * -1e20
alignments = self.probability_fn(energy, state)
if self.is_cumulate:
state = alignments + state
else:
state = alignments
expanded_alignments = tf.expand_dims(alignments, 2)
context = tf.reduce_sum(expanded_alignments * self.values, 1)
return context, alignments, state
def _location_sensitive_score(self, W_query, W_fil, W_keys):
"""Calculate location sensitive energy."""
return tf.squeeze(self.v(tf.nn.tanh(W_keys + W_query + W_fil)), -1)
def get_initial_state(self, batch_size, size):
"""Get initial alignments."""
return tf.zeros(shape=[batch_size, size], dtype=tf.float32)
def get_initial_context(self, batch_size):
"""Get initial attention."""
return tf.zeros(
shape=[batch_size, self.config.encoder_lstm_units * 2], dtype=tf.float32
)
class TFTacotronPrenet(tf.keras.layers.Layer):
"""Tacotron-2 prenet."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.prenet_dense = [
tf.keras.layers.Dense(
units=config.prenet_units,
activation=ACT2FN[config.prenet_activation],
name="dense_._{}".format(i),
)
for i in range(config.n_prenet_layers)
]
self.dropout = tf.keras.layers.Dropout(
rate=config.prenet_dropout_rate, name="dropout"
)
def call(self, inputs, training=False):
"""Call logic."""
outputs = inputs
for layer in self.prenet_dense:
outputs = layer(outputs)
outputs = self.dropout(outputs, training=True)
return outputs
class TFTacotronPostnet(tf.keras.layers.Layer):
"""Tacotron-2 postnet."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_batch_norm = []
for i in range(config.n_conv_postnet):
conv = TFTacotronConvBatchNorm(
filters=config.postnet_conv_filters,
kernel_size=config.postnet_conv_kernel_sizes,
dropout_rate=config.postnet_dropout_rate,
activation="identity" if i + 1 == config.n_conv_postnet else "tanh",
name_idx=i,
)
self.conv_batch_norm.append(conv)
def call(self, inputs, training=False):
"""Call logic."""
outputs = inputs
for _, conv in enumerate(self.conv_batch_norm):
outputs = conv(outputs, training=training)
return outputs
TFTacotronDecoderCellState = collections.namedtuple(
"TFTacotronDecoderCellState",
[
"attention_lstm_state",
"decoder_lstms_state",
"context",
"time",
"state",
"alignment_history",
"max_alignments",
],
)
TFDecoderOutput = collections.namedtuple(
"TFDecoderOutput", ("mel_output", "token_output", "sample_id")
)
class TFTacotronDecoderCell(tf.keras.layers.AbstractRNNCell):
"""Tacotron-2 custom decoder cell."""
def __init__(self, config, enable_tflite_convertible=False, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.enable_tflite_convertible = enable_tflite_convertible
self.prenet = TFTacotronPrenet(config, name="prenet")
# define lstm cell on decoder.
# TODO(@dathudeptrai) switch to zone-out lstm.
self.attention_lstm = tf.keras.layers.LSTMCell(
units=config.decoder_lstm_units, name="attention_lstm_cell"
)
lstm_cells = []
for i in range(config.n_lstm_decoder):
lstm_cell = tf.keras.layers.LSTMCell(
units=config.decoder_lstm_units, name="lstm_cell_._{}".format(i)
)
lstm_cells.append(lstm_cell)
self.decoder_lstms = tf.keras.layers.StackedRNNCells(
lstm_cells, name="decoder_lstms"
)
# define attention layer.
if config.attention_type == "lsa":
# create location-sensitive attention.
self.attention_layer = TFTacotronLocationSensitiveAttention(
config,
memory=None,
mask_encoder=True,
memory_sequence_length=None,
is_cumulate=True,
)
else:
raise ValueError("Only lsa (location-sensitive attention) is supported")
# frame, stop projection layer.
self.frame_projection = tf.keras.layers.Dense(
units=config.n_mels * config.reduction_factor, name="frame_projection"
)
self.stop_projection = tf.keras.layers.Dense(
units=config.reduction_factor, name="stop_projection"
)
self.config = config
def set_alignment_size(self, alignment_size):
self.alignment_size = alignment_size
@property
def output_size(self):
"""Return output (mel) size."""
return self.frame_projection.units
@property
def state_size(self):
"""Return hidden state size."""
return TFTacotronDecoderCellState(
attention_lstm_state=self.attention_lstm.state_size,
decoder_lstms_state=self.decoder_lstms.state_size,
time=tf.TensorShape([]),
attention=self.config.attention_dim,
state=self.alignment_size,
alignment_history=(),
max_alignments=tf.TensorShape([1]),
)
def get_initial_state(self, batch_size):
"""Get initial states."""
initial_attention_lstm_cell_states = self.attention_lstm.get_initial_state(
None, batch_size, dtype=tf.float32
)
initial_decoder_lstms_cell_states = self.decoder_lstms.get_initial_state(
None, batch_size, dtype=tf.float32
)
initial_context = tf.zeros(
shape=[batch_size, self.config.encoder_lstm_units * 2], dtype=tf.float32
)
initial_state = self.attention_layer.get_initial_state(
batch_size, size=self.alignment_size
)
if self.enable_tflite_convertible:
initial_alignment_history = ()
else:
initial_alignment_history = tf.TensorArray(
dtype=tf.float32, size=0, dynamic_size=True
)
return TFTacotronDecoderCellState(
attention_lstm_state=initial_attention_lstm_cell_states,
decoder_lstms_state=initial_decoder_lstms_cell_states,
time=tf.zeros([], dtype=tf.int32),
context=initial_context,
state=initial_state,
alignment_history=initial_alignment_history,
max_alignments=tf.zeros([batch_size], dtype=tf.int32),
)
def call(self, inputs, states, training=False):
"""Call logic."""
decoder_input = inputs
# 1. apply prenet for decoder_input.
prenet_out = self.prenet(decoder_input, training=training) # [batch_size, dim]
# 2. concat prenet_out and prev context vector
# then use it as input of attention lstm layer.
attention_lstm_input = tf.concat([prenet_out, states.context], axis=-1)
attention_lstm_output, next_attention_lstm_state = self.attention_lstm(
attention_lstm_input, states.attention_lstm_state
)
# 3. compute context, alignment and cumulative alignment.
prev_state = states.state
if not self.enable_tflite_convertible:
prev_alignment_history = states.alignment_history
prev_max_alignments = states.max_alignments
context, alignments, state = self.attention_layer(
[attention_lstm_output, prev_state, prev_max_alignments], training=training,
)
# 4. run decoder lstm(s)
decoder_lstms_input = tf.concat([attention_lstm_output, context], axis=-1)
decoder_lstms_output, next_decoder_lstms_state = self.decoder_lstms(
decoder_lstms_input, states.decoder_lstms_state
)
# 5. compute frame feature and stop token.
projection_inputs = tf.concat([decoder_lstms_output, context], axis=-1)
decoder_outputs = self.frame_projection(projection_inputs)
stop_inputs = tf.concat([decoder_lstms_output, decoder_outputs], axis=-1)
stop_tokens = self.stop_projection(stop_inputs)
# 6. save alignment history to visualize.
if self.enable_tflite_convertible:
alignment_history = ()
else:
alignment_history = prev_alignment_history.write(states.time, alignments)
# 7. return new states.
new_states = TFTacotronDecoderCellState(
attention_lstm_state=next_attention_lstm_state,
decoder_lstms_state=next_decoder_lstms_state,
time=states.time + 1,
context=context,
state=state,
alignment_history=alignment_history,
max_alignments=tf.argmax(alignments, -1, output_type=tf.int32),
)
return (decoder_outputs, stop_tokens), new_states
class TFTacotronDecoder(Decoder):
"""Tacotron-2 Decoder."""
def __init__(
self,
decoder_cell,
decoder_sampler,
output_layer=None,
enable_tflite_convertible=False,
):
"""Initial variables."""
self.cell = decoder_cell
self.sampler = decoder_sampler
self.output_layer = output_layer
self.enable_tflite_convertible = enable_tflite_convertible
def setup_decoder_init_state(self, decoder_init_state):
self.initial_state = decoder_init_state
def initialize(self, **kwargs):
return self.sampler.initialize() + (self.initial_state,)
@property
def output_size(self):
return TFDecoderOutput(
mel_output=tf.nest.map_structure(
lambda shape: tf.TensorShape(shape), self.cell.output_size
),
token_output=tf.TensorShape(self.sampler.reduction_factor),
sample_id=tf.TensorShape([1])
if self.enable_tflite_convertible
else self.sampler.sample_ids_shape, # tf.TensorShape([])
)
@property
def output_dtype(self):
return TFDecoderOutput(tf.float32, tf.float32, self.sampler.sample_ids_dtype)
@property
def batch_size(self):
return self.sampler._batch_size
def step(self, time, inputs, state, training=False):
(mel_outputs, stop_tokens), cell_state = self.cell(
inputs, state, training=training
)
if self.output_layer is not None:
mel_outputs = self.output_layer(mel_outputs)
sample_ids = self.sampler.sample(
time=time, outputs=mel_outputs, state=cell_state
)
(finished, next_inputs, next_state) = self.sampler.next_inputs(
time=time,
outputs=mel_outputs,
state=cell_state,
sample_ids=sample_ids,
stop_token_prediction=stop_tokens,
training=training,
)
outputs = TFDecoderOutput(mel_outputs, stop_tokens, sample_ids)
return (outputs, next_state, next_inputs, finished)
class TFTacotron2(BaseModel):
"""Tensorflow tacotron-2 model."""
def __init__(self, config, **kwargs):
"""Initalize tacotron-2 layers."""
enable_tflite_convertible = kwargs.pop("enable_tflite_convertible", False)
super().__init__(self, **kwargs)
self.encoder = TFTacotronEncoder(config, name="encoder")
self.decoder_cell = TFTacotronDecoderCell(
config,
name="decoder_cell",
enable_tflite_convertible=enable_tflite_convertible,
)
self.decoder = TFTacotronDecoder(
self.decoder_cell,
Tacotron2Sampler(config),
enable_tflite_convertible=enable_tflite_convertible,
)
self.postnet = TFTacotronPostnet(config, name="post_net")
self.post_projection = tf.keras.layers.Dense(
units=config.n_mels, name="residual_projection"
)
self.use_window_mask = False
self.maximum_iterations = 4000
self.enable_tflite_convertible = enable_tflite_convertible
self.config = config
def setup_window(self, win_front, win_back):
"""Call only for inference."""
self.use_window_mask = True
self.win_front = win_front
self.win_back = win_back
def setup_maximum_iterations(self, maximum_iterations):
"""Call only for inference."""
self.maximum_iterations = maximum_iterations
def _build(self):
input_ids = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9]])
input_lengths = np.array([9])
speaker_ids = np.array([0])
mel_outputs = np.random.normal(size=(1, 50, 80)).astype(np.float32)
mel_lengths = np.array([50])
self(
input_ids,
input_lengths,
speaker_ids,
mel_outputs,
mel_lengths,
10,
training=True,
)
def call(
self,
input_ids,
input_lengths,
speaker_ids,
mel_gts,
mel_lengths,
maximum_iterations=None,
use_window_mask=False,
win_front=2,
win_back=3,
training=False,
**kwargs,
):
"""Call logic."""
# create input-mask based on input_lengths
input_mask = tf.sequence_mask(
input_lengths,
maxlen=tf.reduce_max(input_lengths),
name="input_sequence_masks",
)
# Encoder Step.
encoder_hidden_states = self.encoder(
[input_ids, speaker_ids, input_mask], training=training
)
batch_size = tf.shape(encoder_hidden_states)[0]
alignment_size = tf.shape(encoder_hidden_states)[1]
# Setup some initial placeholders for decoder step. Include:
# 1. mel_gts, mel_lengths for teacher forcing mode.
# 2. alignment_size for attention size.
# 3. initial state for decoder cell.
# 4. memory (encoder hidden state) for attention mechanism.
self.decoder.sampler.setup_target(targets=mel_gts, mel_lengths=mel_lengths)
self.decoder.cell.set_alignment_size(alignment_size)
self.decoder.setup_decoder_init_state(
self.decoder.cell.get_initial_state(batch_size)
)
self.decoder.cell.attention_layer.setup_memory(
memory=encoder_hidden_states,
memory_sequence_length=input_lengths, # use for mask attention.
)
if use_window_mask:
self.decoder.cell.attention_layer.setup_window(
win_front=win_front, win_back=win_back
)
# run decode step.
(
(frames_prediction, stop_token_prediction, _),
final_decoder_state,
_,
) = dynamic_decode(
self.decoder,
maximum_iterations=maximum_iterations,
enable_tflite_convertible=self.enable_tflite_convertible,
training=training,
)
decoder_outputs = tf.reshape(
frames_prediction, [batch_size, -1, self.config.n_mels]
)
stop_token_prediction = tf.reshape(stop_token_prediction, [batch_size, -1])
residual = self.postnet(decoder_outputs, training=training)
residual_projection = self.post_projection(residual)
mel_outputs = decoder_outputs + residual_projection
if self.enable_tflite_convertible:
mask = tf.math.not_equal(
tf.cast(
tf.reduce_sum(tf.abs(decoder_outputs), axis=-1), dtype=tf.int32
),
0,
)
decoder_outputs = tf.expand_dims(
tf.boolean_mask(decoder_outputs, mask), axis=0
)
mel_outputs = tf.expand_dims(tf.boolean_mask(mel_outputs, mask), axis=0)
alignment_history = ()
else:
alignment_history = tf.transpose(
final_decoder_state.alignment_history.stack(), [1, 2, 0]
)
return decoder_outputs, mel_outputs, stop_token_prediction, alignment_history
@tf.function(
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec([None, None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec([None,], dtype=tf.int32, name="input_lengths"),
tf.TensorSpec([None,], dtype=tf.int32, name="speaker_ids"),
],
)
def inference(self, input_ids, input_lengths, speaker_ids, **kwargs):
"""Call logic."""
# create input-mask based on input_lengths
input_mask = tf.sequence_mask(
input_lengths,
maxlen=tf.reduce_max(input_lengths),
name="input_sequence_masks",
)
# Encoder Step.
encoder_hidden_states = self.encoder(
[input_ids, speaker_ids, input_mask], training=False
)
batch_size = tf.shape(encoder_hidden_states)[0]
alignment_size = tf.shape(encoder_hidden_states)[1]
# Setup some initial placeholders for decoder step. Include:
# 1. batch_size for inference.
# 2. alignment_size for attention size.
# 3. initial state for decoder cell.
# 4. memory (encoder hidden state) for attention mechanism.
# 5. window front/back to solve long sentence synthesize problems. (call after setup memory.)
self.decoder.sampler.set_batch_size(batch_size)
self.decoder.cell.set_alignment_size(alignment_size)
self.decoder.setup_decoder_init_state(
self.decoder.cell.get_initial_state(batch_size)
)
self.decoder.cell.attention_layer.setup_memory(
memory=encoder_hidden_states,
memory_sequence_length=input_lengths, # use for mask attention.
)
if self.use_window_mask:
self.decoder.cell.attention_layer.setup_window(
win_front=self.win_front, win_back=self.win_back
)
# run decode step.
(
(frames_prediction, stop_token_prediction, _),
final_decoder_state,
_,
) = dynamic_decode(
self.decoder, maximum_iterations=self.maximum_iterations, training=False
)
decoder_outputs = tf.reshape(
frames_prediction, [batch_size, -1, self.config.n_mels]
)
stop_token_predictions = tf.reshape(stop_token_prediction, [batch_size, -1])
residual = self.postnet(decoder_outputs, training=False)
residual_projection = self.post_projection(residual)
mel_outputs = decoder_outputs + residual_projection
alignment_historys = tf.transpose(
final_decoder_state.alignment_history.stack(), [1, 2, 0]
)
return decoder_outputs, mel_outputs, stop_token_predictions, alignment_historys
@tf.function(
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec([1, None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec([1,], dtype=tf.int32, name="input_lengths"),
tf.TensorSpec([1,], dtype=tf.int32, name="speaker_ids"),
],
)
def inference_tflite(self, input_ids, input_lengths, speaker_ids, **kwargs):
"""Call logic."""
# create input-mask based on input_lengths
input_mask = tf.sequence_mask(
input_lengths,
maxlen=tf.reduce_max(input_lengths),
name="input_sequence_masks",
)
# Encoder Step.
encoder_hidden_states = self.encoder(
[input_ids, speaker_ids, input_mask], training=False
)
batch_size = tf.shape(encoder_hidden_states)[0]
alignment_size = tf.shape(encoder_hidden_states)[1]
# Setup some initial placeholders for decoder step. Include:
# 1. batch_size for inference.
# 2. alignment_size for attention size.
# 3. initial state for decoder cell.
# 4. memory (encoder hidden state) for attention mechanism.
# 5. window front/back to solve long sentence synthesize problems. (call after setup memory.)
self.decoder.sampler.set_batch_size(batch_size)
self.decoder.cell.set_alignment_size(alignment_size)
self.decoder.setup_decoder_init_state(
self.decoder.cell.get_initial_state(batch_size)
)
self.decoder.cell.attention_layer.setup_memory(
memory=encoder_hidden_states,
memory_sequence_length=input_lengths, # use for mask attention.
)
if self.use_window_mask:
self.decoder.cell.attention_layer.setup_window(
win_front=self.win_front, win_back=self.win_back
)
# run decode step.
(
(frames_prediction, stop_token_prediction, _),
final_decoder_state,
_,
) = dynamic_decode(
self.decoder,
maximum_iterations=self.maximum_iterations,
enable_tflite_convertible=self.enable_tflite_convertible,
training=False,
)
decoder_outputs = tf.reshape(
frames_prediction, [batch_size, -1, self.config.n_mels]
)
stop_token_predictions = tf.reshape(stop_token_prediction, [batch_size, -1])
residual = self.postnet(decoder_outputs, training=False)
residual_projection = self.post_projection(residual)
mel_outputs = decoder_outputs + residual_projection
if self.enable_tflite_convertible:
mask = tf.math.not_equal(
tf.cast(
tf.reduce_sum(tf.abs(decoder_outputs), axis=-1), dtype=tf.int32
),
0,
)
decoder_outputs = tf.expand_dims(
tf.boolean_mask(decoder_outputs, mask), axis=0
)
mel_outputs = tf.expand_dims(tf.boolean_mask(mel_outputs, mask), axis=0)
alignment_historys = ()
else:
alignment_historys = tf.transpose(
final_decoder_state.alignment_history.stack(), [1, 2, 0]
)
return decoder_outputs, mel_outputs, stop_token_predictions, alignment_historys
| 37,180 | 34.716619 | 112 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/models/mb_melgan.py | # -*- coding: utf-8 -*-
# Copyright 2020 The Multi-band MelGAN Authors , Minh Nguyen (@dathudeptrai) and Tomoki Hayashi (@kan-bayashi)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
#
# Compatible with https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/layers/pqmf.py.
"""Multi-band MelGAN Modules."""
import numpy as np
import tensorflow as tf
from scipy.signal import kaiser
from tensorflow_tts.models import BaseModel
from tensorflow_tts.models import TFMelGANGenerator
def design_prototype_filter(taps=62, cutoff_ratio=0.15, beta=9.0):
"""Design prototype filter for PQMF.
This method is based on `A Kaiser window approach for the design of prototype
filters of cosine modulated filterbanks`_.
Args:
taps (int): The number of filter taps.
cutoff_ratio (float): Cut-off frequency ratio.
beta (float): Beta coefficient for kaiser window.
Returns:
ndarray: Impluse response of prototype filter (taps + 1,).
.. _`A Kaiser window approach for the design of prototype filters of cosine modulated filterbanks`:
https://ieeexplore.ieee.org/abstract/document/681427
"""
# check the arguments are valid
assert taps % 2 == 0, "The number of taps mush be even number."
assert 0.0 < cutoff_ratio < 1.0, "Cutoff ratio must be > 0.0 and < 1.0."
# make initial filter
omega_c = np.pi * cutoff_ratio
with np.errstate(invalid="ignore"):
h_i = np.sin(omega_c * (np.arange(taps + 1) - 0.5 * taps)) / (
np.pi * (np.arange(taps + 1) - 0.5 * taps)
)
# fix nan due to indeterminate form
h_i[taps // 2] = np.cos(0) * cutoff_ratio
# apply kaiser window
w = kaiser(taps + 1, beta)
h = h_i * w
return h
class TFPQMF(tf.keras.layers.Layer):
"""PQMF module."""
def __init__(self, config, **kwargs):
"""Initilize PQMF module.
Args:
config (class): MultiBandMelGANGeneratorConfig
"""
super().__init__(**kwargs)
subbands = config.subbands
taps = config.taps
cutoff_ratio = config.cutoff_ratio
beta = config.beta
# define filter coefficient
h_proto = design_prototype_filter(taps, cutoff_ratio, beta)
h_analysis = np.zeros((subbands, len(h_proto)))
h_synthesis = np.zeros((subbands, len(h_proto)))
for k in range(subbands):
h_analysis[k] = (
2
* h_proto
* np.cos(
(2 * k + 1)
* (np.pi / (2 * subbands))
* (np.arange(taps + 1) - (taps / 2))
+ (-1) ** k * np.pi / 4
)
)
h_synthesis[k] = (
2
* h_proto
* np.cos(
(2 * k + 1)
* (np.pi / (2 * subbands))
* (np.arange(taps + 1) - (taps / 2))
- (-1) ** k * np.pi / 4
)
)
# [subbands, 1, taps + 1] == [filter_width, in_channels, out_channels]
analysis_filter = np.expand_dims(h_analysis, 1)
analysis_filter = np.transpose(analysis_filter, (2, 1, 0))
synthesis_filter = np.expand_dims(h_synthesis, 0)
synthesis_filter = np.transpose(synthesis_filter, (2, 1, 0))
# filter for downsampling & upsampling
updown_filter = np.zeros((subbands, subbands, subbands), dtype=np.float32)
for k in range(subbands):
updown_filter[0, k, k] = 1.0
self.subbands = subbands
self.taps = taps
self.analysis_filter = analysis_filter.astype(np.float32)
self.synthesis_filter = synthesis_filter.astype(np.float32)
self.updown_filter = updown_filter.astype(np.float32)
@tf.function(
experimental_relax_shapes=True,
input_signature=[tf.TensorSpec(shape=[None, None, 1], dtype=tf.float32)],
)
def analysis(self, x):
"""Analysis with PQMF.
Args:
x (Tensor): Input tensor (B, T, 1).
Returns:
Tensor: Output tensor (B, T // subbands, subbands).
"""
x = tf.pad(x, [[0, 0], [self.taps // 2, self.taps // 2], [0, 0]])
x = tf.nn.conv1d(x, self.analysis_filter, stride=1, padding="VALID")
x = tf.nn.conv1d(x, self.updown_filter, stride=self.subbands, padding="VALID")
return x
@tf.function(
experimental_relax_shapes=True,
input_signature=[tf.TensorSpec(shape=[None, None, None], dtype=tf.float32)],
)
def synthesis(self, x):
"""Synthesis with PQMF.
Args:
x (Tensor): Input tensor (B, T // subbands, subbands).
Returns:
Tensor: Output tensor (B, T, 1).
"""
x = tf.nn.conv1d_transpose(
x,
self.updown_filter * self.subbands,
strides=self.subbands,
output_shape=(
tf.shape(x)[0],
tf.shape(x)[1] * self.subbands,
self.subbands,
),
)
x = tf.pad(x, [[0, 0], [self.taps // 2, self.taps // 2], [0, 0]])
return tf.nn.conv1d(x, self.synthesis_filter, stride=1, padding="VALID")
class TFMBMelGANGenerator(TFMelGANGenerator):
"""Tensorflow MBMelGAN generator module."""
def __init__(self, config, **kwargs):
super().__init__(config, **kwargs)
self.pqmf = TFPQMF(config=config, dtype=tf.float32, name="pqmf")
def call(self, mels, **kwargs):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, T, channels)
Returns:
Tensor: Output tensor (B, T ** prod(upsample_scales), out_channels)
"""
return self.inference(mels)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[None, None, 80], dtype=tf.float32, name="mels")
]
)
def inference(self, mels):
mb_audios = self.melgan(mels)
return self.pqmf.synthesis(mb_audios)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[1, None, 80], dtype=tf.float32, name="mels")
]
)
def inference_tflite(self, mels):
mb_audios = self.melgan(mels)
return self.pqmf.synthesis(mb_audios)
| 6,890 | 34.704663 | 110 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/models/hifigan.py | # -*- coding: utf-8 -*-
# Copyright 2020 The Hifigan Authors and TensorflowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hifi Modules."""
import numpy as np
import tensorflow as tf
from tensorflow_tts.models.melgan import TFReflectionPad1d
from tensorflow_tts.models.melgan import TFConvTranspose1d
from tensorflow_tts.utils import GroupConv1D
from tensorflow_tts.utils import WeightNormalization
from tensorflow_tts.models import BaseModel
from tensorflow_tts.models import TFMelGANGenerator
class TFHifiResBlock(tf.keras.layers.Layer):
"""Tensorflow Hifigan resblock 1 module."""
def __init__(
self,
kernel_size,
filters,
dilation_rate,
use_bias,
nonlinear_activation,
nonlinear_activation_params,
is_weight_norm,
initializer_seed,
**kwargs
):
"""Initialize TFHifiResBlock module.
Args:
kernel_size (int): Kernel size.
filters (int): Number of filters.
dilation_rate (list): List dilation rate.
use_bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
is_weight_norm (bool): Whether to use weight norm or not.
"""
super().__init__(**kwargs)
self.blocks_1 = []
self.blocks_2 = []
for i in range(len(dilation_rate)):
self.blocks_1.append(
[
TFReflectionPad1d((kernel_size - 1) // 2 * dilation_rate[i]),
tf.keras.layers.Conv1D(
filters=filters,
kernel_size=kernel_size,
dilation_rate=dilation_rate[i],
use_bias=use_bias,
),
]
)
self.blocks_2.append(
[
TFReflectionPad1d((kernel_size - 1) // 2 * 1),
tf.keras.layers.Conv1D(
filters=filters,
kernel_size=kernel_size,
dilation_rate=1,
use_bias=use_bias,
),
]
)
self.activation = getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
)
# apply weightnorm
if is_weight_norm:
self._apply_weightnorm(self.blocks_1)
self._apply_weightnorm(self.blocks_2)
def call(self, x, training=False):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, C).
Returns:
Tensor: Output tensor (B, T, C).
"""
for c1, c2 in zip(self.blocks_1, self.blocks_2):
xt = self.activation(x)
for c in c1:
xt = c(xt)
xt = self.activation(xt)
for c in c2:
xt = c(xt)
x = xt + x
return x
def _apply_weightnorm(self, list_layers):
"""Try apply weightnorm for all layer in list_layers."""
for i in range(len(list_layers)):
try:
layer_name = list_layers[i].name.lower()
if "conv1d" in layer_name or "dense" in layer_name:
list_layers[i] = WeightNormalization(list_layers[i])
except Exception:
pass
class TFMultiHifiResBlock(tf.keras.layers.Layer):
"""Tensorflow Multi Hifigan resblock 1 module."""
def __init__(self, list_resblock, **kwargs):
super().__init__(**kwargs)
self.list_resblock = list_resblock
def call(self, x, training=False):
xs = None
for resblock in self.list_resblock:
if xs is None:
xs = resblock(x, training=training)
else:
xs += resblock(x, training=training)
return xs / len(self.list_resblock)
class TFHifiGANGenerator(BaseModel):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
# check hyper parameter is valid or not
assert (
config.stacks
== len(config.stack_kernel_size)
== len(config.stack_dilation_rate)
)
# add initial layer
layers = []
layers += [
TFReflectionPad1d(
(config.kernel_size - 1) // 2,
padding_type=config.padding_type,
name="first_reflect_padding",
),
tf.keras.layers.Conv1D(
filters=config.filters,
kernel_size=config.kernel_size,
use_bias=config.use_bias,
),
]
for i, upsample_scale in enumerate(config.upsample_scales):
# add upsampling layer
layers += [
getattr(tf.keras.layers, config.nonlinear_activation)(
**config.nonlinear_activation_params
),
TFConvTranspose1d(
filters=config.filters // (2 ** (i + 1)),
kernel_size=upsample_scale * 2,
strides=upsample_scale,
padding="same",
is_weight_norm=config.is_weight_norm,
initializer_seed=config.initializer_seed,
name="conv_transpose_._{}".format(i),
),
]
# add residual stack layer
layers += [
TFMultiHifiResBlock(
list_resblock=[
TFHifiResBlock(
kernel_size=config.stack_kernel_size[j],
filters=config.filters // (2 ** (i + 1)),
dilation_rate=config.stack_dilation_rate[j],
use_bias=config.use_bias,
nonlinear_activation=config.nonlinear_activation,
nonlinear_activation_params=config.nonlinear_activation_params,
is_weight_norm=config.is_weight_norm,
initializer_seed=config.initializer_seed,
name="hifigan_resblock_._{}".format(j),
)
for j in range(config.stacks)
],
name="multi_hifigan_resblock_._{}".format(i),
)
]
# add final layer
layers += [
getattr(tf.keras.layers, config.nonlinear_activation)(
**config.nonlinear_activation_params
),
TFReflectionPad1d(
(config.kernel_size - 1) // 2,
padding_type=config.padding_type,
name="last_reflect_padding",
),
tf.keras.layers.Conv1D(
filters=config.out_channels,
kernel_size=config.kernel_size,
use_bias=config.use_bias,
dtype=tf.float32,
),
]
if config.use_final_nolinear_activation:
layers += [tf.keras.layers.Activation("tanh", dtype=tf.float32)]
if config.is_weight_norm is True:
self._apply_weightnorm(layers)
self.hifigan = tf.keras.models.Sequential(layers)
def call(self, mels, **kwargs):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, T, channels)
Returns:
Tensor: Output tensor (B, T ** prod(upsample_scales), out_channels)
"""
return self.inference(mels)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[None, None, 80], dtype=tf.float32, name="mels")
]
)
def inference(self, mels):
return self.hifigan(mels)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[1, None, 80], dtype=tf.float32, name="mels")
]
)
def inference_tflite(self, mels):
return self.hifigan(mels)
def _apply_weightnorm(self, list_layers):
"""Try apply weightnorm for all layer in list_layers."""
for i in range(len(list_layers)):
try:
layer_name = list_layers[i].name.lower()
if "conv1d" in layer_name or "dense" in layer_name:
list_layers[i] = WeightNormalization(list_layers[i])
except Exception:
pass
def _build(self):
"""Build model by passing fake input."""
fake_mels = tf.random.uniform(shape=[1, 100, 80], dtype=tf.float32)
self(fake_mels)
class TFHifiGANPeriodDiscriminator(tf.keras.layers.Layer):
"""Tensorflow Hifigan period discriminator module."""
def __init__(
self,
period,
out_channels=1,
n_layers=5,
kernel_size=5,
strides=3,
filters=8,
filter_scales=4,
max_filters=1024,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
initializer_seed=42,
is_weight_norm=False,
**kwargs
):
super().__init__(**kwargs)
self.period = period
self.out_filters = out_channels
self.convs = []
for i in range(n_layers):
self.convs.append(
tf.keras.layers.Conv2D(
filters=min(filters * (filter_scales ** (i + 1)), max_filters),
kernel_size=(kernel_size, 1),
strides=(strides, 1),
padding="same",
)
)
self.conv_post = tf.keras.layers.Conv2D(
filters=out_channels, kernel_size=(3, 1), padding="same",
)
self.activation = getattr(tf.keras.layers, nonlinear_activation)(
**nonlinear_activation_params
)
if is_weight_norm:
self._apply_weightnorm(self.convs)
self.conv_post = WeightNormalization(self.conv_post)
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, T, 1).
Returns:
List: List of output tensors.
"""
shape = tf.shape(x)
n_pad = tf.convert_to_tensor(0, dtype=tf.int32)
if shape[1] % self.period != 0:
n_pad = self.period - (shape[1] % self.period)
x = tf.pad(x, [[0, 0], [0, n_pad], [0, 0]], "REFLECT")
x = tf.reshape(
x, [shape[0], (shape[1] + n_pad) // self.period, self.period, x.shape[2]]
)
for layer in self.convs:
x = layer(x)
x = self.activation(x)
x = self.conv_post(x)
x = tf.reshape(x, [shape[0], -1, self.out_filters])
return [x]
def _apply_weightnorm(self, list_layers):
"""Try apply weightnorm for all layer in list_layers."""
for i in range(len(list_layers)):
try:
layer_name = list_layers[i].name.lower()
if "conv1d" in layer_name or "dense" in layer_name:
list_layers[i] = WeightNormalization(list_layers[i])
except Exception:
pass
class TFHifiGANMultiPeriodDiscriminator(BaseModel):
"""Tensorflow Hifigan Multi Period discriminator module."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.discriminator = []
# add discriminator
for i in range(len(config.period_scales)):
self.discriminator += [
TFHifiGANPeriodDiscriminator(
config.period_scales[i],
out_channels=config.out_channels,
n_layers=config.n_layers,
kernel_size=config.kernel_size,
strides=config.strides,
filters=config.filters,
filter_scales=config.filter_scales,
max_filters=config.max_filters,
nonlinear_activation=config.nonlinear_activation,
nonlinear_activation_params=config.nonlinear_activation_params,
initializer_seed=config.initializer_seed,
is_weight_norm=config.is_weight_norm,
name="hifigan_period_discriminator_._{}".format(i),
)
]
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, T, 1).
Returns:
List: list of each discriminator outputs
"""
outs = []
for f in self.discriminator:
outs += [f(x)]
return outs
| 13,272 | 33.928947 | 91 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/models/__init__.py | from tensorflow_tts.models.base_model import BaseModel
from tensorflow_tts.models.fastspeech import TFFastSpeech
from tensorflow_tts.models.fastspeech2 import TFFastSpeech2
from tensorflow_tts.models.melgan import (
TFMelGANDiscriminator,
TFMelGANGenerator,
TFMelGANMultiScaleDiscriminator,
)
from tensorflow_tts.models.mb_melgan import TFPQMF
from tensorflow_tts.models.mb_melgan import TFMBMelGANGenerator
from tensorflow_tts.models.hifigan import (
TFHifiGANGenerator,
TFHifiGANMultiPeriodDiscriminator,
TFHifiGANPeriodDiscriminator
)
from tensorflow_tts.models.tacotron2 import TFTacotron2
from tensorflow_tts.models.parallel_wavegan import TFParallelWaveGANGenerator
from tensorflow_tts.models.parallel_wavegan import TFParallelWaveGANDiscriminator
| 778 | 40 | 81 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/models/fastspeech2.py | # -*- coding: utf-8 -*-
# Copyright 2020 The FastSpeech2 Authors and Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Model modules for FastSpeech2."""
import tensorflow as tf
from tensorflow_tts.models.fastspeech import TFFastSpeech, get_initializer
class TFFastSpeechVariantPredictor(tf.keras.layers.Layer):
"""FastSpeech duration predictor module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_layers = []
for i in range(config.variant_prediction_num_conv_layers):
self.conv_layers.append(
tf.keras.layers.Conv1D(
config.variant_predictor_filter,
config.variant_predictor_kernel_size,
padding="same",
name="conv_._{}".format(i),
)
)
self.conv_layers.append(tf.keras.layers.Activation(tf.nn.relu))
self.conv_layers.append(
tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="LayerNorm_._{}".format(i)
)
)
self.conv_layers.append(
tf.keras.layers.Dropout(config.variant_predictor_dropout_rate)
)
self.conv_layers_sequence = tf.keras.Sequential(self.conv_layers)
self.output_layer = tf.keras.layers.Dense(1)
if config.n_speakers > 1:
self.decoder_speaker_embeddings = tf.keras.layers.Embedding(
config.n_speakers,
config.encoder_self_attention_params.hidden_size,
embeddings_initializer=get_initializer(config.initializer_range),
name="speaker_embeddings",
)
self.speaker_fc = tf.keras.layers.Dense(
units=config.encoder_self_attention_params.hidden_size,
name="speaker_fc",
)
self.config = config
def call(self, inputs, training=False):
"""Call logic."""
encoder_hidden_states, speaker_ids, attention_mask = inputs
attention_mask = tf.cast(
tf.expand_dims(attention_mask, 2), encoder_hidden_states.dtype
)
if self.config.n_speakers > 1:
speaker_embeddings = self.decoder_speaker_embeddings(speaker_ids)
speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings))
# extended speaker embeddings
extended_speaker_features = speaker_features[:, tf.newaxis, :]
encoder_hidden_states += extended_speaker_features
# mask encoder hidden states
masked_encoder_hidden_states = encoder_hidden_states * attention_mask
# pass though first layer
outputs = self.conv_layers_sequence(masked_encoder_hidden_states)
outputs = self.output_layer(outputs)
masked_outputs = outputs * attention_mask
outputs = tf.squeeze(masked_outputs, -1)
return outputs
class TFFastSpeech2(TFFastSpeech):
"""TF Fastspeech module."""
def __init__(self, config, **kwargs):
"""Init layers for fastspeech."""
super().__init__(config, **kwargs)
self.f0_predictor = TFFastSpeechVariantPredictor(
config, dtype=tf.float32, name="f0_predictor"
)
self.energy_predictor = TFFastSpeechVariantPredictor(
config, dtype=tf.float32, name="energy_predictor",
)
self.duration_predictor = TFFastSpeechVariantPredictor(
config, dtype=tf.float32, name="duration_predictor"
)
# define f0_embeddings and energy_embeddings
self.f0_embeddings = tf.keras.layers.Conv1D(
filters=config.encoder_self_attention_params.hidden_size,
kernel_size=9,
padding="same",
name="f0_embeddings",
)
self.f0_dropout = tf.keras.layers.Dropout(0.5)
self.energy_embeddings = tf.keras.layers.Conv1D(
filters=config.encoder_self_attention_params.hidden_size,
kernel_size=9,
padding="same",
name="energy_embeddings",
)
self.energy_dropout = tf.keras.layers.Dropout(0.5)
def _build(self):
"""Dummy input for building model."""
# fake inputs
input_ids = tf.convert_to_tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], tf.int32)
speaker_ids = tf.convert_to_tensor([0], tf.int32)
duration_gts = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32)
f0_gts = tf.convert_to_tensor(
[[10, 10, 10, 10, 10, 10, 10, 10, 10, 10]], tf.float32
)
energy_gts = tf.convert_to_tensor(
[[10, 10, 10, 10, 10, 10, 10, 10, 10, 10]], tf.float32
)
self(
input_ids=input_ids,
speaker_ids=speaker_ids,
duration_gts=duration_gts,
f0_gts=f0_gts,
energy_gts=energy_gts,
)
def call(
self,
input_ids,
speaker_ids,
duration_gts,
f0_gts,
energy_gts,
training=False,
**kwargs,
):
"""Call logic."""
attention_mask = tf.math.not_equal(input_ids, 0)
embedding_output = self.embeddings([input_ids, speaker_ids], training=training)
encoder_output = self.encoder(
[embedding_output, attention_mask], training=training
)
last_encoder_hidden_states = encoder_output[0]
# energy predictor, here use last_encoder_hidden_states, u can use more hidden_states layers
# rather than just use last_hidden_states of encoder for energy_predictor.
duration_outputs = self.duration_predictor(
[last_encoder_hidden_states, speaker_ids, attention_mask]
) # [batch_size, length]
f0_outputs = self.f0_predictor(
[last_encoder_hidden_states, speaker_ids, attention_mask], training=training
)
energy_outputs = self.energy_predictor(
[last_encoder_hidden_states, speaker_ids, attention_mask], training=training
)
f0_embedding = self.f0_embeddings(
tf.expand_dims(f0_gts, 2)
) # [barch_size, mel_length, feature]
energy_embedding = self.energy_embeddings(
tf.expand_dims(energy_gts, 2)
) # [barch_size, mel_length, feature]
# apply dropout both training/inference
f0_embedding = self.f0_dropout(f0_embedding, training=True)
energy_embedding = self.energy_dropout(energy_embedding, training=True)
# sum features
last_encoder_hidden_states += f0_embedding + energy_embedding
length_regulator_outputs, encoder_masks = self.length_regulator(
[last_encoder_hidden_states, duration_gts], training=training
)
# create decoder positional embedding
decoder_pos = tf.range(
1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32
)
masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks
decoder_output = self.decoder(
[length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos],
training=training,
)
last_decoder_hidden_states = decoder_output[0]
# here u can use sum or concat more than 1 hidden states layers from decoder.
mels_before = self.mel_dense(last_decoder_hidden_states)
mels_after = (
self.postnet([mels_before, encoder_masks], training=training) + mels_before
)
outputs = (
mels_before,
mels_after,
duration_outputs,
f0_outputs,
energy_outputs,
)
return outputs
def _inference(
self, input_ids, speaker_ids, speed_ratios, f0_ratios, energy_ratios, **kwargs,
):
"""Call logic."""
attention_mask = tf.math.not_equal(input_ids, 0)
embedding_output = self.embeddings([input_ids, speaker_ids], training=False)
encoder_output = self.encoder(
[embedding_output, attention_mask], training=False
)
last_encoder_hidden_states = encoder_output[0]
# expand ratios
speed_ratios = tf.expand_dims(speed_ratios, 1) # [B, 1]
f0_ratios = tf.expand_dims(f0_ratios, 1) # [B, 1]
energy_ratios = tf.expand_dims(energy_ratios, 1) # [B, 1]
# energy predictor, here use last_encoder_hidden_states, u can use more hidden_states layers
# rather than just use last_hidden_states of encoder for energy_predictor.
duration_outputs = self.duration_predictor(
[last_encoder_hidden_states, speaker_ids, attention_mask]
) # [batch_size, length]
duration_outputs = tf.nn.relu(tf.math.exp(duration_outputs) - 1.0)
duration_outputs = tf.cast(
tf.math.round(duration_outputs * speed_ratios), tf.int32
)
f0_outputs = self.f0_predictor(
[last_encoder_hidden_states, speaker_ids, attention_mask], training=False
)
f0_outputs *= f0_ratios
energy_outputs = self.energy_predictor(
[last_encoder_hidden_states, speaker_ids, attention_mask], training=False
)
energy_outputs *= energy_ratios
f0_embedding = self.f0_dropout(
self.f0_embeddings(tf.expand_dims(f0_outputs, 2)), training=True
)
energy_embedding = self.energy_dropout(
self.energy_embeddings(tf.expand_dims(energy_outputs, 2)), training=True
)
# sum features
last_encoder_hidden_states += f0_embedding + energy_embedding
length_regulator_outputs, encoder_masks = self.length_regulator(
[last_encoder_hidden_states, duration_outputs], training=False
)
# create decoder positional embedding
decoder_pos = tf.range(
1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32
)
masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks
decoder_output = self.decoder(
[length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos],
training=False,
)
last_decoder_hidden_states = decoder_output[0]
# here u can use sum or concat more than 1 hidden states layers from decoder.
mel_before = self.mel_dense(last_decoder_hidden_states)
mel_after = (
self.postnet([mel_before, encoder_masks], training=False) + mel_before
)
outputs = (mel_before, mel_after, duration_outputs, f0_outputs, energy_outputs)
return outputs
def setup_inference_fn(self):
self.inference = tf.function(
self._inference,
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec(shape=[None, None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec(shape=[None,], dtype=tf.int32, name="speaker_ids"),
tf.TensorSpec(shape=[None,], dtype=tf.float32, name="speed_ratios"),
tf.TensorSpec(shape=[None,], dtype=tf.float32, name="f0_ratios"),
tf.TensorSpec(shape=[None,], dtype=tf.float32, name="energy_ratios"),
],
)
self.inference_tflite = tf.function(
self._inference,
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec(shape=[1, None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec(shape=[1,], dtype=tf.int32, name="speaker_ids"),
tf.TensorSpec(shape=[1,], dtype=tf.float32, name="speed_ratios"),
tf.TensorSpec(shape=[1,], dtype=tf.float32, name="f0_ratios"),
tf.TensorSpec(shape=[1,], dtype=tf.float32, name="energy_ratios"),
],
)
| 12,399 | 38.616613 | 100 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/models/fastspeech.py | # -*- coding: utf-8 -*-
# Copyright 2020 The FastSpeech Authors, The HuggingFace Inc. team and Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Model modules for FastSpeech."""
import numpy as np
import tensorflow as tf
from tensorflow_tts.models import BaseModel
def get_initializer(initializer_range=0.02):
"""Creates a `tf.initializers.truncated_normal` with the given range.
Args:
initializer_range: float, initializer range for stddev.
Returns:
TruncatedNormal initializer with stddev = `initializer_range`.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
def gelu(x):
"""Gaussian Error Linear unit."""
cdf = 0.5 * (1.0 + tf.math.erf(x / tf.math.sqrt(2.0)))
return x * cdf
def gelu_new(x):
"""Smoother gaussian Error Linear Unit."""
cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def swish(x):
"""Swish activation function."""
return tf.nn.swish(x)
def mish(x):
return x * tf.math.tanh(tf.math.softplus(x))
ACT2FN = {
"identity": tf.keras.layers.Activation("linear"),
"tanh": tf.keras.layers.Activation("tanh"),
"gelu": tf.keras.layers.Activation(gelu),
"relu": tf.keras.activations.relu,
"swish": tf.keras.layers.Activation(swish),
"gelu_new": tf.keras.layers.Activation(gelu_new),
"mish": tf.keras.layers.Activation(mish),
}
class TFEmbedding(tf.keras.layers.Embedding):
"""Faster version of embedding."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def call(self, inputs):
inputs = tf.cast(inputs, tf.int32)
outputs = tf.gather(self.embeddings, inputs)
return outputs
class TFFastSpeechEmbeddings(tf.keras.layers.Layer):
"""Construct charactor/phoneme/positional/speaker embeddings."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.hidden_size = config.encoder_self_attention_params.hidden_size
self.initializer_range = config.initializer_range
self.config = config
self.position_embeddings = TFEmbedding(
config.max_position_embeddings + 1,
self.hidden_size,
weights=[
self._sincos_embedding(
self.hidden_size, self.config.max_position_embeddings
)
],
name="position_embeddings",
trainable=False,
)
if config.n_speakers > 1:
self.encoder_speaker_embeddings = TFEmbedding(
config.n_speakers,
self.hidden_size,
embeddings_initializer=get_initializer(self.initializer_range),
name="speaker_embeddings",
)
self.speaker_fc = tf.keras.layers.Dense(
units=self.hidden_size, name="speaker_fc"
)
def build(self, input_shape):
"""Build shared charactor/phoneme embedding layers."""
with tf.name_scope("charactor_embeddings"):
self.charactor_embeddings = self.add_weight(
"weight",
shape=[self.vocab_size, self.hidden_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
def call(self, inputs, training=False):
"""Get charactor embeddings of inputs.
Args:
1. charactor, Tensor (int32) shape [batch_size, length].
2. speaker_id, Tensor (int32) shape [batch_size]
Returns:
Tensor (float32) shape [batch_size, length, embedding_size].
"""
return self._embedding(inputs, training=training)
def _embedding(self, inputs, training=False):
"""Applies embedding based on inputs tensor."""
input_ids, speaker_ids = inputs
input_shape = tf.shape(input_ids)
seq_length = input_shape[1]
position_ids = tf.range(1, seq_length + 1, dtype=tf.int32)[tf.newaxis, :]
# create embeddings
inputs_embeds = tf.gather(self.charactor_embeddings, input_ids)
position_embeddings = self.position_embeddings(position_ids)
# sum embedding
embeddings = inputs_embeds + tf.cast(position_embeddings, inputs_embeds.dtype)
if self.config.n_speakers > 1:
speaker_embeddings = self.encoder_speaker_embeddings(speaker_ids)
speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings))
# extended speaker embeddings
extended_speaker_features = speaker_features[:, tf.newaxis, :]
embeddings += extended_speaker_features
return embeddings
def _sincos_embedding(
self, hidden_size, max_positional_embedding,
):
position_enc = np.array(
[
[
pos / np.power(10000, 2.0 * (i // 2) / hidden_size)
for i in range(hidden_size)
]
for pos in range(max_positional_embedding + 1)
]
)
position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])
position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])
# pad embedding.
position_enc[0] = 0.0
return position_enc
def resize_positional_embeddings(self, new_size):
self.position_embeddings = TFEmbedding(
new_size + 1,
self.hidden_size,
weights=[self._sincos_embedding(self.hidden_size, new_size)],
name="position_embeddings",
trainable=False,
)
class TFFastSpeechSelfAttention(tf.keras.layers.Layer):
"""Self attention module for fastspeech."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.all_head_size = self.num_attention_heads * config.attention_head_size
self.query = tf.keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="query",
)
self.key = tf.keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="key",
)
self.value = tf.keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="value",
)
self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
self.config = config
def transpose_for_scores(self, x, batch_size):
"""Transpose to calculate attention scores."""
x = tf.reshape(
x,
(batch_size, -1, self.num_attention_heads, self.config.attention_head_size),
)
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, attention_mask = inputs
batch_size = tf.shape(hidden_states)[0]
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
dk = tf.cast(
tf.shape(key_layer)[-1], attention_scores.dtype
) # scale attention_scores
attention_scores = attention_scores / tf.math.sqrt(dk)
if attention_mask is not None:
# extended_attention_masks for self attention encoder.
extended_attention_mask = attention_mask[:, tf.newaxis, tf.newaxis, :]
extended_attention_mask = tf.cast(
extended_attention_mask, attention_scores.dtype
)
extended_attention_mask = (1.0 - extended_attention_mask) * -1e9
attention_scores = attention_scores + extended_attention_mask
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(attention_scores, axis=-1)
attention_probs = self.dropout(attention_probs, training=training)
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
context_layer = tf.reshape(context_layer, (batch_size, -1, self.all_head_size))
outputs = (
(context_layer, attention_probs)
if self.output_attentions
else (context_layer,)
)
return outputs
class TFFastSpeechSelfOutput(tf.keras.layers.Layer):
"""Fastspeech output of self attention module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
self.LayerNorm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="LayerNorm"
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, input_tensor = inputs
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFFastSpeechAttention(tf.keras.layers.Layer):
"""Fastspeech attention module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.self_attention = TFFastSpeechSelfAttention(config, name="self")
self.dense_output = TFFastSpeechSelfOutput(config, name="output")
def call(self, inputs, training=False):
input_tensor, attention_mask = inputs
self_outputs = self.self_attention(
[input_tensor, attention_mask], training=training
)
attention_output = self.dense_output(
[self_outputs[0], input_tensor], training=training
)
masked_attention_output = attention_output * tf.cast(
tf.expand_dims(attention_mask, 2), dtype=attention_output.dtype
)
outputs = (masked_attention_output,) + self_outputs[
1:
] # add attentions if we output them
return outputs
class TFFastSpeechIntermediate(tf.keras.layers.Layer):
"""Intermediate representation module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv1d_1 = tf.keras.layers.Conv1D(
config.intermediate_size,
kernel_size=config.intermediate_kernel_size,
kernel_initializer=get_initializer(config.initializer_range),
padding="same",
name="conv1d_1",
)
self.conv1d_2 = tf.keras.layers.Conv1D(
config.hidden_size,
kernel_size=config.intermediate_kernel_size,
kernel_initializer=get_initializer(config.initializer_range),
padding="same",
name="conv1d_2",
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def call(self, inputs):
"""Call logic."""
hidden_states, attention_mask = inputs
hidden_states = self.conv1d_1(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.conv1d_2(hidden_states)
masked_hidden_states = hidden_states * tf.cast(
tf.expand_dims(attention_mask, 2), dtype=hidden_states.dtype
)
return masked_hidden_states
class TFFastSpeechOutput(tf.keras.layers.Layer):
"""Output module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.LayerNorm = tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="LayerNorm"
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, input_tensor = inputs
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFFastSpeechLayer(tf.keras.layers.Layer):
"""Fastspeech module (FFT module on the paper)."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.attention = TFFastSpeechAttention(config, name="attention")
self.intermediate = TFFastSpeechIntermediate(config, name="intermediate")
self.bert_output = TFFastSpeechOutput(config, name="output")
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, attention_mask = inputs
attention_outputs = self.attention(
[hidden_states, attention_mask], training=training
)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(
[attention_output, attention_mask], training=training
)
layer_output = self.bert_output(
[intermediate_output, attention_output], training=training
)
masked_layer_output = layer_output * tf.cast(
tf.expand_dims(attention_mask, 2), dtype=layer_output.dtype
)
outputs = (masked_layer_output,) + attention_outputs[
1:
] # add attentions if we output them
return outputs
class TFFastSpeechEncoder(tf.keras.layers.Layer):
"""Fast Speech encoder module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = [
TFFastSpeechLayer(config, name="layer_._{}".format(i))
for i in range(config.num_hidden_layers)
]
def call(self, inputs, training=False):
"""Call logic."""
hidden_states, attention_mask = inputs
all_hidden_states = ()
all_attentions = ()
for _, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
[hidden_states, attention_mask], training=training
)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # outputs, (hidden states), (attentions)
class TFFastSpeechDecoder(TFFastSpeechEncoder):
"""Fast Speech decoder module."""
def __init__(self, config, **kwargs):
self.is_compatible_encoder = kwargs.pop("is_compatible_encoder", True)
super().__init__(config, **kwargs)
self.config = config
# create decoder positional embedding
self.decoder_positional_embeddings = TFEmbedding(
config.max_position_embeddings + 1,
config.hidden_size,
weights=[self._sincos_embedding()],
name="position_embeddings",
trainable=False,
)
if self.is_compatible_encoder is False:
self.project_compatible_decoder = tf.keras.layers.Dense(
units=config.hidden_size, name="project_compatible_decoder"
)
if config.n_speakers > 1:
self.decoder_speaker_embeddings = TFEmbedding(
config.n_speakers,
config.hidden_size,
embeddings_initializer=get_initializer(config.initializer_range),
name="speaker_embeddings",
)
self.speaker_fc = tf.keras.layers.Dense(
units=config.hidden_size, name="speaker_fc"
)
def call(self, inputs, training=False):
hidden_states, speaker_ids, encoder_mask, decoder_pos = inputs
if self.is_compatible_encoder is False:
hidden_states = self.project_compatible_decoder(hidden_states)
# calculate new hidden states.
hidden_states += tf.cast(
self.decoder_positional_embeddings(decoder_pos), hidden_states.dtype
)
if self.config.n_speakers > 1:
speaker_embeddings = self.decoder_speaker_embeddings(speaker_ids)
speaker_features = tf.math.softplus(self.speaker_fc(speaker_embeddings))
# extended speaker embeddings
extended_speaker_features = speaker_features[:, tf.newaxis, :]
hidden_states += extended_speaker_features
return super().call([hidden_states, encoder_mask], training=training)
def _sincos_embedding(self):
position_enc = np.array(
[
[
pos / np.power(10000, 2.0 * (i // 2) / self.config.hidden_size)
for i in range(self.config.hidden_size)
]
for pos in range(self.config.max_position_embeddings + 1)
]
)
position_enc[:, 0::2] = np.sin(position_enc[:, 0::2])
position_enc[:, 1::2] = np.cos(position_enc[:, 1::2])
# pad embedding.
position_enc[0] = 0.0
return position_enc
class TFTacotronPostnet(tf.keras.layers.Layer):
"""Tacotron-2 postnet."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_batch_norm = []
for i in range(config.n_conv_postnet):
conv = tf.keras.layers.Conv1D(
filters=config.postnet_conv_filters
if i < config.n_conv_postnet - 1
else config.num_mels,
kernel_size=config.postnet_conv_kernel_sizes,
padding="same",
name="conv_._{}".format(i),
)
batch_norm = tf.keras.layers.BatchNormalization(
axis=-1, name="batch_norm_._{}".format(i)
)
self.conv_batch_norm.append((conv, batch_norm))
self.dropout = tf.keras.layers.Dropout(
rate=config.postnet_dropout_rate, name="dropout"
)
self.activation = [tf.nn.tanh] * (config.n_conv_postnet - 1) + [tf.identity]
def call(self, inputs, training=False):
"""Call logic."""
outputs, mask = inputs
extended_mask = tf.cast(tf.expand_dims(mask, axis=2), outputs.dtype)
for i, (conv, bn) in enumerate(self.conv_batch_norm):
outputs = conv(outputs)
outputs = bn(outputs)
outputs = self.activation[i](outputs)
outputs = self.dropout(outputs, training=training)
return outputs * extended_mask
class TFFastSpeechDurationPredictor(tf.keras.layers.Layer):
"""FastSpeech duration predictor module."""
def __init__(self, config, **kwargs):
"""Init variables."""
super().__init__(**kwargs)
self.conv_layers = []
for i in range(config.num_duration_conv_layers):
self.conv_layers.append(
tf.keras.layers.Conv1D(
config.duration_predictor_filters,
config.duration_predictor_kernel_sizes,
padding="same",
name="conv_._{}".format(i),
)
)
self.conv_layers.append(
tf.keras.layers.LayerNormalization(
epsilon=config.layer_norm_eps, name="LayerNorm_._{}".format(i)
)
)
self.conv_layers.append(tf.keras.layers.Activation(tf.nn.relu6))
self.conv_layers.append(
tf.keras.layers.Dropout(config.duration_predictor_dropout_probs)
)
self.conv_layers_sequence = tf.keras.Sequential(self.conv_layers)
self.output_layer = tf.keras.layers.Dense(1)
def call(self, inputs, training=False):
"""Call logic."""
encoder_hidden_states, attention_mask = inputs
attention_mask = tf.cast(
tf.expand_dims(attention_mask, 2), encoder_hidden_states.dtype
)
# mask encoder hidden states
masked_encoder_hidden_states = encoder_hidden_states * attention_mask
# pass though first layer
outputs = self.conv_layers_sequence(masked_encoder_hidden_states)
outputs = self.output_layer(outputs)
masked_outputs = outputs * attention_mask
return tf.squeeze(tf.nn.relu6(masked_outputs), -1) # make sure positive value.
class TFFastSpeechLengthRegulator(tf.keras.layers.Layer):
"""FastSpeech lengthregulator module."""
def __init__(self, config, **kwargs):
"""Init variables."""
self.enable_tflite_convertible = kwargs.pop("enable_tflite_convertible", False)
super().__init__(**kwargs)
self.config = config
def call(self, inputs, training=False):
"""Call logic.
Args:
1. encoder_hidden_states, Tensor (float32) shape [batch_size, length, hidden_size]
2. durations_gt, Tensor (float32/int32) shape [batch_size, length]
"""
encoder_hidden_states, durations_gt = inputs
outputs, encoder_masks = self._length_regulator(
encoder_hidden_states, durations_gt
)
return outputs, encoder_masks
def _length_regulator(self, encoder_hidden_states, durations_gt):
"""Length regulator logic."""
sum_durations = tf.reduce_sum(durations_gt, axis=-1) # [batch_size]
max_durations = tf.reduce_max(sum_durations)
input_shape = tf.shape(encoder_hidden_states)
batch_size = input_shape[0]
hidden_size = input_shape[-1]
# initialize output hidden states and encoder masking.
if self.enable_tflite_convertible:
# There is only 1 batch in inference, so we don't have to use
# `tf.While` op with 3-D output tensor.
repeats = durations_gt[0]
real_length = tf.reduce_sum(repeats)
pad_size = max_durations - real_length
# masks : [max_durations]
masks = tf.sequence_mask([real_length], max_durations, dtype=tf.int32)
repeat_encoder_hidden_states = tf.repeat(
encoder_hidden_states[0], repeats=repeats, axis=0
)
repeat_encoder_hidden_states = tf.expand_dims(
tf.pad(repeat_encoder_hidden_states, [[0, pad_size], [0, 0]]), 0
) # [1, max_durations, hidden_size]
outputs = repeat_encoder_hidden_states
encoder_masks = masks
else:
outputs = tf.zeros(
shape=[0, max_durations, hidden_size], dtype=encoder_hidden_states.dtype
)
encoder_masks = tf.zeros(shape=[0, max_durations], dtype=tf.int32)
def condition(
i,
batch_size,
outputs,
encoder_masks,
encoder_hidden_states,
durations_gt,
max_durations,
):
return tf.less(i, batch_size)
def body(
i,
batch_size,
outputs,
encoder_masks,
encoder_hidden_states,
durations_gt,
max_durations,
):
repeats = durations_gt[i]
real_length = tf.reduce_sum(repeats)
pad_size = max_durations - real_length
masks = tf.sequence_mask([real_length], max_durations, dtype=tf.int32)
repeat_encoder_hidden_states = tf.repeat(
encoder_hidden_states[i], repeats=repeats, axis=0
)
repeat_encoder_hidden_states = tf.expand_dims(
tf.pad(repeat_encoder_hidden_states, [[0, pad_size], [0, 0]]), 0
) # [1, max_durations, hidden_size]
outputs = tf.concat([outputs, repeat_encoder_hidden_states], axis=0)
encoder_masks = tf.concat([encoder_masks, masks], axis=0)
return [
i + 1,
batch_size,
outputs,
encoder_masks,
encoder_hidden_states,
durations_gt,
max_durations,
]
# initialize iteration i.
i = tf.constant(0, dtype=tf.int32)
_, _, outputs, encoder_masks, _, _, _, = tf.while_loop(
condition,
body,
[
i,
batch_size,
outputs,
encoder_masks,
encoder_hidden_states,
durations_gt,
max_durations,
],
shape_invariants=[
i.get_shape(),
batch_size.get_shape(),
tf.TensorShape(
[
None,
None,
self.config.encoder_self_attention_params.hidden_size,
]
),
tf.TensorShape([None, None]),
encoder_hidden_states.get_shape(),
durations_gt.get_shape(),
max_durations.get_shape(),
],
)
return outputs, encoder_masks
class TFFastSpeech(BaseModel):
"""TF Fastspeech module."""
def __init__(self, config, **kwargs):
"""Init layers for fastspeech."""
self.enable_tflite_convertible = kwargs.pop("enable_tflite_convertible", False)
super().__init__(**kwargs)
self.embeddings = TFFastSpeechEmbeddings(config, name="embeddings")
self.encoder = TFFastSpeechEncoder(
config.encoder_self_attention_params, name="encoder"
)
self.duration_predictor = TFFastSpeechDurationPredictor(
config, dtype=tf.float32, name="duration_predictor"
)
self.length_regulator = TFFastSpeechLengthRegulator(
config,
enable_tflite_convertible=self.enable_tflite_convertible,
name="length_regulator",
)
self.decoder = TFFastSpeechDecoder(
config.decoder_self_attention_params,
is_compatible_encoder=config.encoder_self_attention_params.hidden_size
== config.decoder_self_attention_params.hidden_size,
name="decoder",
)
self.mel_dense = tf.keras.layers.Dense(
units=config.num_mels, dtype=tf.float32, name="mel_before"
)
self.postnet = TFTacotronPostnet(
config=config, dtype=tf.float32, name="postnet"
)
self.setup_inference_fn()
def _build(self):
"""Dummy input for building model."""
# fake inputs
input_ids = tf.convert_to_tensor([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]], tf.int32)
speaker_ids = tf.convert_to_tensor([0], tf.int32)
duration_gts = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], tf.int32)
self(input_ids, speaker_ids, duration_gts)
def resize_positional_embeddings(self, new_size):
self.embeddings.resize_positional_embeddings(new_size)
self._build()
def call(
self, input_ids, speaker_ids, duration_gts, training=False, **kwargs,
):
"""Call logic."""
attention_mask = tf.math.not_equal(input_ids, 0)
embedding_output = self.embeddings([input_ids, speaker_ids], training=training)
encoder_output = self.encoder(
[embedding_output, attention_mask], training=training
)
last_encoder_hidden_states = encoder_output[0]
# duration predictor, here use last_encoder_hidden_states, u can use more hidden_states layers
# rather than just use last_hidden_states of encoder for duration_predictor.
duration_outputs = self.duration_predictor(
[last_encoder_hidden_states, attention_mask]
) # [batch_size, length]
length_regulator_outputs, encoder_masks = self.length_regulator(
[last_encoder_hidden_states, duration_gts], training=training
)
# create decoder positional embedding
decoder_pos = tf.range(
1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32
)
masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks
decoder_output = self.decoder(
[length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos],
training=training,
)
last_decoder_hidden_states = decoder_output[0]
# here u can use sum or concat more than 1 hidden states layers from decoder.
mel_before = self.mel_dense(last_decoder_hidden_states)
mel_after = (
self.postnet([mel_before, encoder_masks], training=training) + mel_before
)
outputs = (mel_before, mel_after, duration_outputs)
return outputs
def _inference(self, input_ids, speaker_ids, speed_ratios, **kwargs):
"""Call logic."""
attention_mask = tf.math.not_equal(input_ids, 0)
embedding_output = self.embeddings([input_ids, speaker_ids], training=False)
encoder_output = self.encoder(
[embedding_output, attention_mask], training=False
)
last_encoder_hidden_states = encoder_output[0]
# duration predictor, here use last_encoder_hidden_states, u can use more hidden_states layers
# rather than just use last_hidden_states of encoder for duration_predictor.
duration_outputs = self.duration_predictor(
[last_encoder_hidden_states, attention_mask]
) # [batch_size, length]
duration_outputs = tf.math.exp(duration_outputs) - 1.0
if speed_ratios is None:
speed_ratios = tf.convert_to_tensor(np.array([1.0]), dtype=tf.float32)
speed_ratios = tf.expand_dims(speed_ratios, 1)
duration_outputs = tf.cast(
tf.math.round(duration_outputs * speed_ratios), tf.int32
)
length_regulator_outputs, encoder_masks = self.length_regulator(
[last_encoder_hidden_states, duration_outputs], training=False
)
# create decoder positional embedding
decoder_pos = tf.range(
1, tf.shape(length_regulator_outputs)[1] + 1, dtype=tf.int32
)
masked_decoder_pos = tf.expand_dims(decoder_pos, 0) * encoder_masks
decoder_output = self.decoder(
[length_regulator_outputs, speaker_ids, encoder_masks, masked_decoder_pos],
training=False,
)
last_decoder_hidden_states = decoder_output[0]
# here u can use sum or concat more than 1 hidden states layers from decoder.
mel_before = self.mel_dense(last_decoder_hidden_states)
mel_after = (
self.postnet([mel_before, encoder_masks], training=False) + mel_before
)
outputs = (mel_before, mel_after, duration_outputs)
return outputs
def setup_inference_fn(self):
self.inference = tf.function(
self._inference,
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec(shape=[None, None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec(shape=[None,], dtype=tf.int32, name="speaker_ids"),
tf.TensorSpec(shape=[None,], dtype=tf.float32, name="speed_ratios"),
],
)
self.inference_tflite = tf.function(
self._inference,
experimental_relax_shapes=True,
input_signature=[
tf.TensorSpec(shape=[1, None], dtype=tf.int32, name="input_ids"),
tf.TensorSpec(shape=[1,], dtype=tf.int32, name="speaker_ids"),
tf.TensorSpec(shape=[1,], dtype=tf.float32, name="speed_ratios"),
],
)
| 33,971 | 36.372937 | 102 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/processor/synpaflex.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform preprocessing and raw feature extraction for SynPaFlex dataset."""
import os
import re
import numpy as np
import soundfile as sf
from dataclasses import dataclass
from tensorflow_tts.processor import BaseProcessor
from tensorflow_tts.utils import cleaners
_pad = "pad"
_eos = "eos"
_punctuation = "!/\'(),-.:;? "
_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzéèàùâêîôûçäëïöüÿœæ"
# Export all symbols:
SYNPAFLEX_SYMBOLS = (
[_pad] + list(_punctuation) + list(_letters) + [_eos]
)
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r"(.*?)\{(.+?)\}(.*)")
@dataclass
class SynpaflexProcessor(BaseProcessor):
"""SynPaFlex processor."""
cleaner_names: str = "basic_cleaners"
positions = {
"wave_file": 0,
"text": 1,
"text_norm": 2
}
train_f_name: str = "synpaflex.txt"
def create_items(self):
if self.data_dir:
with open(
os.path.join(self.data_dir, self.train_f_name), encoding="utf-8"
) as f:
self.items = [self.split_line(self.data_dir, line, "|") for line in f]
def split_line(self, data_dir, line, split):
parts = line.strip().split(split)
wave_file = parts[self.positions["wave_file"]]
text = parts[self.positions["text"]]
wav_path = os.path.join(data_dir, "wavs", f"{wave_file}.wav")
speaker_name = "synpaflex"
return text, wav_path, speaker_name
def setup_eos_token(self):
return _eos
def get_one_sample(self, item):
text, wav_path, speaker_name = item
# normalize audio signal to be [-1, 1], soundfile already norm.
audio, rate = sf.read(wav_path)
audio = audio.astype(np.float32)
# convert text to ids
text_ids = np.asarray(self.text_to_sequence(text), np.int32)
sample = {
"raw_text": text,
"text_ids": text_ids,
"audio": audio,
"utt_id": os.path.split(wav_path)[-1].split(".")[0],
"speaker_name": speaker_name,
"rate": rate,
}
return sample
def text_to_sequence(self, text):
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += self._symbols_to_sequence(
self._clean_text(text, [self.cleaner_names])
)
break
sequence += self._symbols_to_sequence(
self._clean_text(m.group(1), [self.cleaner_names])
)
sequence += self._arpabet_to_sequence(m.group(2))
text = m.group(3)
# add eos tokens
sequence += [self.eos_id]
return sequence
def _clean_text(self, text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception("Unknown cleaner: %s" % name)
text = cleaner(text)
return text
def _symbols_to_sequence(self, symbols):
return [self.symbol_to_id[s] for s in symbols if self._should_keep_symbol(s)]
def _sequence_to_symbols(self, sequence):
return [self.id_to_symbol[s] for s in sequence]
def _arpabet_to_sequence(self, text):
return self._symbols_to_sequence(["@" + s for s in text.split()])
def _should_keep_symbol(self, s):
return s in self.symbol_to_id and s != "_" and s != "~"
def save_pretrained(self, saved_path):
os.makedirs(saved_path, exist_ok=True)
self._save_mapper(os.path.join(saved_path, PROCESSOR_FILE_NAME), {})
| 4,341 | 31.402985 | 86 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/processor/ljspeechu.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform preprocessing and raw feature extraction for LJSpeech Ultimate dataset."""
import os
import re
import numpy as np
import soundfile as sf
from dataclasses import dataclass
from tensorflow_tts.processor import BaseProcessor
from tensorflow_tts.utils import cleaners
from tensorflow_tts.utils.utils import PROCESSOR_FILE_NAME
from g2p_en import G2p as grapheme_to_phn
valid_symbols = [
"AA",
"AA0",
"AA1",
"AA2",
"AE",
"AE0",
"AE1",
"AE2",
"AH",
"AH0",
"AH1",
"AH2",
"AO",
"AO0",
"AO1",
"AO2",
"AW",
"AW0",
"AW1",
"AW2",
"AY",
"AY0",
"AY1",
"AY2",
"B",
"CH",
"D",
"DH",
"EH",
"EH0",
"EH1",
"EH2",
"ER",
"ER0",
"ER1",
"ER2",
"EY",
"EY0",
"EY1",
"EY2",
"F",
"G",
"HH",
"IH",
"IH0",
"IH1",
"IH2",
"IY",
"IY0",
"IY1",
"IY2",
"JH",
"K",
"L",
"M",
"N",
"NG",
"OW",
"OW0",
"OW1",
"OW2",
"OY",
"OY0",
"OY1",
"OY2",
"P",
"R",
"S",
"SH",
"T",
"TH",
"UH",
"UH0",
"UH1",
"UH2",
"UW",
"UW0",
"UW1",
"UW2",
"V",
"W",
"Y",
"Z",
"ZH",
]
_pad = "pad"
_eos = "eos"
_punctuation = "!'(),.:;?" # Unlike LJSpeech, we do not use spaces since we are phoneme only and spaces lead to very bad attention performance with phonetic input.
_special = "-"
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ["@" + s for s in valid_symbols]
# Export all symbols:
LJSPEECH_U_SYMBOLS = [_pad] + list(_special) + list(_punctuation) + _arpabet + [_eos]
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r"(.*?)\{(.+?)\}(.*)")
_arpa_exempt = _punctuation + _special
arpa_g2p = grapheme_to_phn()
@dataclass
class LJSpeechUltimateProcessor(BaseProcessor):
"""LJSpeech Ultimate processor."""
cleaner_names: str = "english_cleaners"
positions = {
"wave_file": 0,
"text_norm": 1,
}
train_f_name: str = "filelist.txt"
def create_items(self):
if self.data_dir:
with open(
os.path.join(self.data_dir, self.train_f_name), encoding="utf-8"
) as f:
self.items = [self.split_line(self.data_dir, line, "|") for line in f]
def split_line(self, data_dir, line, split):
parts = line.strip().split(split)
wave_file = parts[self.positions["wave_file"]]
text_norm = parts[self.positions["text_norm"]]
wav_path = os.path.join(data_dir, wave_file)
speaker_name = "ljspeech"
return text_norm, wav_path, speaker_name
def setup_eos_token(self):
return _eos
def save_pretrained(self, saved_path):
os.makedirs(saved_path, exist_ok=True)
self._save_mapper(os.path.join(saved_path, PROCESSOR_FILE_NAME), {})
def to_arpa(self, in_str):
phn_arr = arpa_g2p(in_str)
phn_arr = [x for x in phn_arr if x != " "]
arpa_str = "{"
in_chain = True
# Iterative array-traverse approach to build ARPA string. Phonemes must be in curly braces, but not punctuation
for token in phn_arr:
if token in _arpa_exempt and in_chain:
arpa_str += " }"
in_chain = False
if token not in _arpa_exempt and not in_chain:
arpa_str += " {"
in_chain = True
arpa_str += " " + token
if in_chain:
arpa_str += " }"
return arpa_str
def get_one_sample(self, item):
text, wav_path, speaker_name = item
# Check if this line is already an ARPA string by searching for the trademark curly brace. If not, we apply
if not "{" in text:
text = self.to_arpa(text)
# normalize audio signal to be [-1, 1], soundfile already norm.
audio, rate = sf.read(wav_path)
audio = audio.astype(np.float32)
# convert text to ids
text_ids = np.asarray(self.text_to_sequence(text), np.int32)
sample = {
"raw_text": text,
"text_ids": text_ids,
"audio": audio,
"utt_id": os.path.split(wav_path)[-1].split(".")[0],
"speaker_name": speaker_name,
"rate": rate,
}
return sample
def text_to_sequence(self, text):
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += self._symbols_to_sequence(
self._clean_text(text, [self.cleaner_names])
)
break
sequence += self._symbols_to_sequence(
self._clean_text(m.group(1), [self.cleaner_names])
)
sequence += self._arpabet_to_sequence(m.group(2))
text = m.group(3)
# add eos tokens
sequence += [self.eos_id]
return sequence
def _clean_text(self, text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception("Unknown cleaner: %s" % name)
text = cleaner(text)
return text
def _symbols_to_sequence(self, symbols):
return [self.symbol_to_id[s] for s in symbols if self._should_keep_symbol(s)]
def _arpabet_to_sequence(self, text):
return self._symbols_to_sequence(["@" + s for s in text.split()])
def _should_keep_symbol(self, s):
return s in self.symbol_to_id and s != "_" and s != "~"
| 6,368 | 24.173913 | 164 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/processor/base_processor.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Processor for all processor."""
import abc
import json
import os
from typing import Dict, List, Union
from dataclasses import dataclass, field
class DataProcessorError(Exception):
pass
@dataclass
class BaseProcessor(abc.ABC):
data_dir: str
symbols: List[str] = field(default_factory=list)
speakers_map: Dict[str, int] = field(default_factory=dict)
train_f_name: str = "train.txt"
delimiter: str = "|"
positions = {
"file": 0,
"text": 1,
"speaker_name": 2,
} # positions of file,text,speaker_name after split line
f_extension: str = ".wav"
saved_mapper_path: str = None
loaded_mapper_path: str = None
# extras
items: List[List[str]] = field(default_factory=list) # text, wav_path, speaker_name
symbol_to_id: Dict[str, int] = field(default_factory=dict)
id_to_symbol: Dict[int, str] = field(default_factory=dict)
def __post_init__(self):
if self.loaded_mapper_path is not None:
self._load_mapper(loaded_path=self.loaded_mapper_path)
if self.setup_eos_token():
self.add_symbol(
self.setup_eos_token()
) # if this eos token not yet present in symbols list.
self.eos_id = self.symbol_to_id[self.setup_eos_token()]
return
if self.symbols.__len__() < 1:
raise DataProcessorError("Symbols list is empty but mapper isn't loaded")
self.create_items()
self.create_speaker_map()
self.reverse_speaker = {v: k for k, v in self.speakers_map.items()}
self.create_symbols()
if self.saved_mapper_path is not None:
self._save_mapper(saved_path=self.saved_mapper_path)
# processor name. usefull to use it for AutoProcessor
self._processor_name = type(self).__name__
if self.setup_eos_token():
self.add_symbol(
self.setup_eos_token()
) # if this eos token not yet present in symbols list.
self.eos_id = self.symbol_to_id[self.setup_eos_token()]
def __getattr__(self, name: str) -> Union[str, int]:
if "_id" in name: # map symbol to id
return self.symbol_to_id[name.replace("_id", "")]
return self.symbol_to_id[name] # map symbol to value
def create_speaker_map(self):
"""
Create speaker map for dataset.
"""
sp_id = 0
for i in self.items:
speaker_name = i[-1]
if speaker_name not in self.speakers_map:
self.speakers_map[speaker_name] = sp_id
sp_id += 1
def get_speaker_id(self, name: str) -> int:
return self.speakers_map[name]
def get_speaker_name(self, speaker_id: int) -> str:
return self.speakers_map[speaker_id]
def create_symbols(self):
self.symbol_to_id = {s: i for i, s in enumerate(self.symbols)}
self.id_to_symbol = {i: s for i, s in enumerate(self.symbols)}
def create_items(self):
"""
Method used to create items from training file
items struct example => text, wav_file_path, speaker_name.
Note that the speaker_name should be a last.
"""
with open(
os.path.join(self.data_dir, self.train_f_name), mode="r", encoding="utf-8"
) as f:
for line in f:
parts = line.strip().split(self.delimiter)
wav_path = os.path.join(self.data_dir, parts[self.positions["file"]])
wav_path = (
wav_path + self.f_extension
if wav_path[-len(self.f_extension) :] != self.f_extension
else wav_path
)
text = parts[self.positions["text"]]
speaker_name = parts[self.positions["speaker_name"]]
self.items.append([text, wav_path, speaker_name])
def add_symbol(self, symbol: Union[str, list]):
if isinstance(symbol, str):
if symbol in self.symbol_to_id:
return
self.symbols.append(symbol)
symbol_id = len(self.symbol_to_id)
self.symbol_to_id[symbol] = symbol_id
self.id_to_symbol[symbol_id] = symbol
elif isinstance(symbol, list):
for i in symbol:
self.add_symbol(i)
else:
raise ValueError("A new_symbols must be a string or list of string.")
@abc.abstractmethod
def get_one_sample(self, item):
"""Get one sample from dataset items.
Args:
item: one item in Dataset items.
Dataset items may include (raw_text, speaker_id, wav_path, ...)
Returns:
sample (dict): sample dictionary return all feature used for preprocessing later.
"""
sample = {
"raw_text": None,
"text_ids": None,
"audio": None,
"utt_id": None,
"speaker_name": None,
"rate": None,
}
return sample
@abc.abstractmethod
def text_to_sequence(self, text: str):
return []
@abc.abstractmethod
def setup_eos_token(self):
"""Return eos symbol of type string."""
return "eos"
def convert_symbols_to_ids(self, symbols: Union[str, list]):
sequence = []
if isinstance(symbols, str):
sequence.append(self._symbol_to_id[symbols])
return sequence
elif isinstance(symbols, list):
for s in symbols:
if isinstance(s, str):
sequence.append(self._symbol_to_id[s])
else:
raise ValueError("All elements of symbols must be a string.")
else:
raise ValueError("A symbols must be a string or list of string.")
return sequence
def _load_mapper(self, loaded_path: str = None):
"""
Save all needed mappers to file
"""
loaded_path = (
os.path.join(self.data_dir, "mapper.json")
if loaded_path is None
else loaded_path
)
with open(loaded_path, "r") as f:
data = json.load(f)
self.speakers_map = data["speakers_map"]
self.symbol_to_id = data["symbol_to_id"]
self.id_to_symbol = {int(k): v for k, v in data["id_to_symbol"].items()}
self._processor_name = data["processor_name"]
# other keys
all_data_keys = data.keys()
for key in all_data_keys:
if key not in ["speakers_map", "symbol_to_id", "id_to_symbol"]:
setattr(self, key, data[key])
def _save_mapper(self, saved_path: str = None, extra_attrs_to_save: dict = None):
"""
Save all needed mappers to file
"""
saved_path = (
os.path.join(self.data_dir, "mapper.json")
if saved_path is None
else saved_path
)
with open(saved_path, "w") as f:
full_mapper = {
"symbol_to_id": self.symbol_to_id,
"id_to_symbol": self.id_to_symbol,
"speakers_map": self.speakers_map,
"processor_name": self._processor_name,
}
if extra_attrs_to_save:
full_mapper = {**full_mapper, **extra_attrs_to_save}
json.dump(full_mapper, f)
@abc.abstractmethod
def save_pretrained(self, saved_path):
"""Save mappers to file"""
pass
| 8,120 | 34.00431 | 93 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/processor/jsut.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform preprocessing and raw feature extraction for JSUT dataset."""
import os
import re
import numpy as np
import soundfile as sf
import pyopenjtalk
import yaml
import librosa
from dataclasses import dataclass
from tensorflow_tts.processor import BaseProcessor
# from tensorflow_tts.utils import cleaners
from tensorflow_tts.utils.utils import PROCESSOR_FILE_NAME
valid_symbols = [
'N',
'a',
'b',
'by',
'ch',
'cl',
'd',
'dy',
'e',
'f',
'g',
'gy',
'h',
'hy',
'i',
'j',
'k',
'ky',
'm',
'my',
'n',
'ny',
'o',
'p',
'pau',
'py',
'r',
'ry',
's',
'sh',
't',
'ts',
'u',
'v',
'w',
'y',
'z'
]
_pad = "pad"
_eos = "eos"
_sil = "sil"
# _punctuation = "!'(),.:;? "
# _special = "-"
# _letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
# _arpabet = ["@" + s for s in valid_symbols]
# Export all symbols:
JSUT_SYMBOLS = (
[_pad] + [_sil] + valid_symbols + [_eos]
)
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r"(.*?)\{(.+?)\}(.*)")
@dataclass
class JSUTProcessor(BaseProcessor):
"""JSUT processor."""
cleaner_names: str = None
speaker_name: str = "jsut"
train_f_name: str = "text_kana/basic5000.yaml"
def create_items(self):
items = []
if self.data_dir:
with open(
os.path.join(self.data_dir, self.train_f_name), encoding="utf-8"
) as f:
data_json = yaml.load(f, Loader=yaml.FullLoader)
for k, v in data_json.items():
utt_id = k
phones = v['phone_level3']
phones = phones.split("-")
phones = [_sil] + phones + [_sil]
wav_path = os.path.join(self.data_dir, "wav", f"{utt_id}.wav")
items.append(
[" ".join(phones), wav_path, utt_id, self.speaker_name]
)
self.items = items
def setup_eos_token(self):
return _eos
def save_pretrained(self, saved_path):
os.makedirs(saved_path, exist_ok=True)
self._save_mapper(os.path.join(saved_path, PROCESSOR_FILE_NAME), {})
def get_one_sample(self, item):
text, wav_path, utt_id, speaker_name = item
# normalize audio signal to be [-1, 1], soundfile already norm.
audio, rate = sf.read(wav_path)
audio = audio.astype(np.float32)
# if rate != self.target_rate:
# assert rate > self.target_rate
# audio = librosa.resample(audio, rate, self.target_rate)
# convert text to ids
text_ids = np.asarray(self.text_to_sequence(text), np.int32)
sample = {
"raw_text": text,
"text_ids": text_ids,
"audio": audio,
"utt_id": utt_id,
"speaker_name": speaker_name,
"rate": rate,
}
return sample
def text_to_sequence(self, text, inference=False):
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
if inference:
text = pyopenjtalk.g2p(text)
text = text.replace("I", "i")
text = text.replace("U", "u")
print(f"phoneme seq: {text}")
for symbol in text.split():
idx = self.symbol_to_id[symbol]
sequence.append(idx)
# add eos tokens
sequence += [self.eos_id]
return sequence
| 4,262 | 25.153374 | 95 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/processor/ljspeech.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform preprocessing and raw feature extraction for LJSpeech dataset."""
import os
import re
import numpy as np
import soundfile as sf
from dataclasses import dataclass
from tensorflow_tts.processor import BaseProcessor
from tensorflow_tts.utils import cleaners
from tensorflow_tts.utils.utils import PROCESSOR_FILE_NAME
valid_symbols = [
"AA",
"AA0",
"AA1",
"AA2",
"AE",
"AE0",
"AE1",
"AE2",
"AH",
"AH0",
"AH1",
"AH2",
"AO",
"AO0",
"AO1",
"AO2",
"AW",
"AW0",
"AW1",
"AW2",
"AY",
"AY0",
"AY1",
"AY2",
"B",
"CH",
"D",
"DH",
"EH",
"EH0",
"EH1",
"EH2",
"ER",
"ER0",
"ER1",
"ER2",
"EY",
"EY0",
"EY1",
"EY2",
"F",
"G",
"HH",
"IH",
"IH0",
"IH1",
"IH2",
"IY",
"IY0",
"IY1",
"IY2",
"JH",
"K",
"L",
"M",
"N",
"NG",
"OW",
"OW0",
"OW1",
"OW2",
"OY",
"OY0",
"OY1",
"OY2",
"P",
"R",
"S",
"SH",
"T",
"TH",
"UH",
"UH0",
"UH1",
"UH2",
"UW",
"UW0",
"UW1",
"UW2",
"V",
"W",
"Y",
"Z",
"ZH",
]
_pad = "pad"
_eos = "eos"
_punctuation = "!'(),.:;? "
_special = "-"
_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ["@" + s for s in valid_symbols]
# Export all symbols:
LJSPEECH_SYMBOLS = (
[_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet + [_eos]
)
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r"(.*?)\{(.+?)\}(.*)")
@dataclass
class LJSpeechProcessor(BaseProcessor):
"""LJSpeech processor."""
cleaner_names: str = "english_cleaners"
positions = {
"wave_file": 0,
"text": 1,
"text_norm": 2,
}
train_f_name: str = "metadata.csv"
def create_items(self):
if self.data_dir:
with open(
os.path.join(self.data_dir, self.train_f_name), encoding="utf-8"
) as f:
self.items = [self.split_line(self.data_dir, line, "|") for line in f]
def split_line(self, data_dir, line, split):
parts = line.strip().split(split)
wave_file = parts[self.positions["wave_file"]]
text_norm = parts[self.positions["text_norm"]]
wav_path = os.path.join(data_dir, "wavs", f"{wave_file}.wav")
speaker_name = "ljspeech"
return text_norm, wav_path, speaker_name
def setup_eos_token(self):
return _eos
def save_pretrained(self, saved_path):
os.makedirs(saved_path, exist_ok=True)
self._save_mapper(os.path.join(saved_path, PROCESSOR_FILE_NAME), {})
def get_one_sample(self, item):
text, wav_path, speaker_name = item
# normalize audio signal to be [-1, 1], soundfile already norm.
audio, rate = sf.read(wav_path)
audio = audio.astype(np.float32)
# convert text to ids
text_ids = np.asarray(self.text_to_sequence(text), np.int32)
sample = {
"raw_text": text,
"text_ids": text_ids,
"audio": audio,
"utt_id": os.path.split(wav_path)[-1].split(".")[0],
"speaker_name": speaker_name,
"rate": rate,
}
return sample
def text_to_sequence(self, text):
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += self._symbols_to_sequence(
self._clean_text(text, [self.cleaner_names])
)
break
sequence += self._symbols_to_sequence(
self._clean_text(m.group(1), [self.cleaner_names])
)
sequence += self._arpabet_to_sequence(m.group(2))
text = m.group(3)
# add eos tokens
sequence += [self.eos_id]
return sequence
def _clean_text(self, text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception("Unknown cleaner: %s" % name)
text = cleaner(text)
return text
def _symbols_to_sequence(self, symbols):
return [self.symbol_to_id[s] for s in symbols if self._should_keep_symbol(s)]
def _arpabet_to_sequence(self, text):
return self._symbols_to_sequence(["@" + s for s in text.split()])
def _should_keep_symbol(self, s):
return s in self.symbol_to_id and s != "_" and s != "~"
| 5,362 | 23.049327 | 95 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/processor/libritts.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform preprocessing and raw feature extraction for LibriTTS dataset."""
import os
import re
import numpy as np
import soundfile as sf
from dataclasses import dataclass
from g2p_en import g2p as grapheme_to_phonem
from tensorflow_tts.processor.base_processor import BaseProcessor
from tensorflow_tts.utils.utils import PROCESSOR_FILE_NAME
g2p = grapheme_to_phonem.G2p()
valid_symbols = g2p.phonemes
valid_symbols.append("SIL")
valid_symbols.append("END")
_punctuation = "!'(),.:;? "
_arpabet = ["@" + s for s in valid_symbols]
LIBRITTS_SYMBOLS = _arpabet + list(_punctuation)
@dataclass
class LibriTTSProcessor(BaseProcessor):
mode: str = "train"
train_f_name: str = "train.txt"
positions = {
"file": 0,
"text": 1,
"speaker_name": 2,
} # positions of file,text,speaker_name after split line
f_extension: str = ".wav"
cleaner_names: str = None
def create_items(self):
with open(
os.path.join(self.data_dir, self.train_f_name), mode="r", encoding="utf-8"
) as f:
for line in f:
parts = line.strip().split(self.delimiter)
wav_path = os.path.join(self.data_dir, parts[self.positions["file"]])
wav_path = (
wav_path + self.f_extension
if wav_path[-len(self.f_extension) :] != self.f_extension
else wav_path
)
text = parts[self.positions["text"]]
speaker_name = parts[self.positions["speaker_name"]]
self.items.append([text, wav_path, speaker_name])
def get_one_sample(self, item):
text, wav_path, speaker_name = item
audio, rate = sf.read(wav_path, dtype="float32")
text_ids = np.asarray(self.text_to_sequence(text), np.int32)
sample = {
"raw_text": text,
"text_ids": text_ids,
"audio": audio,
"utt_id": wav_path.split("/")[-1].split(".")[0],
"speaker_name": speaker_name,
"rate": rate,
}
return sample
def setup_eos_token(self):
return None # because we do not use this
def save_pretrained(self, saved_path):
os.makedirs(saved_path, exist_ok=True)
self._save_mapper(os.path.join(saved_path, PROCESSOR_FILE_NAME), {})
def text_to_sequence(self, text):
if (
self.mode == "train"
): # in train mode text should be already transformed to phonemes
return self.symbols_to_ids(self.clean_g2p(text.split(" ")))
else:
return self.inference_text_to_seq(text)
def inference_text_to_seq(self, text: str):
return self.symbols_to_ids(self.text_to_ph(text))
def symbols_to_ids(self, symbols_list: list):
return [self.symbol_to_id[s] for s in symbols_list]
def text_to_ph(self, text: str):
return self.clean_g2p(g2p(text))
def clean_g2p(self, g2p_text: list):
data = []
for i, txt in enumerate(g2p_text):
if i == len(g2p_text) - 1:
if txt != " " and txt != "SIL":
data.append("@" + txt)
else:
data.append(
"@END"
) # TODO try learning without end token and compare results
break
if txt != " ":
data.append("@" + txt)
return data
| 4,067 | 31.544 | 86 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/processor/kss.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform preprocessing and raw feature extraction for KSS dataset."""
import os
import re
import numpy as np
import soundfile as sf
from dataclasses import dataclass
from tensorflow_tts.processor import BaseProcessor
from tensorflow_tts.utils import cleaners
from tensorflow_tts.utils.korean import symbols as KSS_SYMBOLS
from tensorflow_tts.utils.utils import PROCESSOR_FILE_NAME
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r"(.*?)\{(.+?)\}(.*)")
@dataclass
class KSSProcessor(BaseProcessor):
"""KSS processor."""
cleaner_names: str = "korean_cleaners"
positions = {
"wave_file": 0,
"text_norm": 2,
}
train_f_name: str = "transcript.v.1.4.txt"
def create_items(self):
if self.data_dir:
with open(
os.path.join(self.data_dir, self.train_f_name), encoding="utf-8"
) as f:
self.items = [self.split_line(self.data_dir, line, "|") for line in f]
def split_line(self, data_dir, line, split):
parts = line.strip().split(split)
wave_file = parts[self.positions["wave_file"]]
text_norm = parts[self.positions["text_norm"]]
wav_path = os.path.join(data_dir, "kss", wave_file)
speaker_name = "kss"
return text_norm, wav_path, speaker_name
def setup_eos_token(self):
return "eos"
def save_pretrained(self, saved_path):
os.makedirs(saved_path, exist_ok=True)
self._save_mapper(os.path.join(saved_path, PROCESSOR_FILE_NAME), {})
def get_one_sample(self, item):
text, wav_path, speaker_name = item
# normalize audio signal to be [-1, 1], soundfile already norm.
audio, rate = sf.read(wav_path)
audio = audio.astype(np.float32)
# convert text to ids
text_ids = np.asarray(self.text_to_sequence(text), np.int32)
sample = {
"raw_text": text,
"text_ids": text_ids,
"audio": audio,
"utt_id": os.path.split(wav_path)[-1].split(".")[0],
"speaker_name": speaker_name,
"rate": rate,
}
return sample
def text_to_sequence(self, text):
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += self._symbols_to_sequence(
self._clean_text(text, [self.cleaner_names])
)
break
sequence += self._symbols_to_sequence(
self._clean_text(m.group(1), [self.cleaner_names])
)
sequence += self._arpabet_to_sequence(m.group(2))
text = m.group(3)
# add eos tokens
sequence += [self.eos_id]
return sequence
def _clean_text(self, text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception("Unknown cleaner: %s" % name)
text = cleaner(text)
return text
def _symbols_to_sequence(self, symbols):
return [self.symbol_to_id[s] for s in symbols if self._should_keep_symbol(s)]
def _arpabet_to_sequence(self, text):
return self._symbols_to_sequence(["@" + s for s in text.split()])
def _should_keep_symbol(self, s):
return s in self.symbol_to_id and s != "_" and s != "~"
| 4,086 | 32.227642 | 86 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/processor/__init__.py | from tensorflow_tts.processor.base_processor import BaseProcessor
from tensorflow_tts.processor.ljspeech import LJSpeechProcessor
from tensorflow_tts.processor.baker import BakerProcessor
from tensorflow_tts.processor.kss import KSSProcessor
from tensorflow_tts.processor.libritts import LibriTTSProcessor
from tensorflow_tts.processor.thorsten import ThorstenProcessor
from tensorflow_tts.processor.ljspeechu import LJSpeechUltimateProcessor
from tensorflow_tts.processor.synpaflex import SynpaflexProcessor
from tensorflow_tts.processor.jsut import JSUTProcessor
| 566 | 50.545455 | 72 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/processor/baker.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform preprocessing and raw feature extraction for Baker dataset."""
import os
import re
from typing import Dict, List, Union, Tuple, Any
import librosa
import numpy as np
import soundfile as sf
from dataclasses import dataclass, field
from pypinyin import Style
from pypinyin.contrib.neutral_tone import NeutralToneWith5Mixin
from pypinyin.converter import DefaultConverter
from pypinyin.core import Pinyin
from tensorflow_tts.processor import BaseProcessor
from tensorflow_tts.utils.utils import PROCESSOR_FILE_NAME
_pad = ["pad"]
_eos = ["eos"]
_pause = ["sil", "#0", "#1", "#2", "#3"]
_initials = [
"^",
"b",
"c",
"ch",
"d",
"f",
"g",
"h",
"j",
"k",
"l",
"m",
"n",
"p",
"q",
"r",
"s",
"sh",
"t",
"x",
"z",
"zh",
]
_tones = ["1", "2", "3", "4", "5"]
_finals = [
"a",
"ai",
"an",
"ang",
"ao",
"e",
"ei",
"en",
"eng",
"er",
"i",
"ia",
"ian",
"iang",
"iao",
"ie",
"ii",
"iii",
"in",
"ing",
"iong",
"iou",
"o",
"ong",
"ou",
"u",
"ua",
"uai",
"uan",
"uang",
"uei",
"uen",
"ueng",
"uo",
"v",
"van",
"ve",
"vn",
]
BAKER_SYMBOLS = _pad + _pause + _initials + [i + j for i in _finals for j in _tones] + _eos
PINYIN_DICT = {
"a": ("^", "a"),
"ai": ("^", "ai"),
"an": ("^", "an"),
"ang": ("^", "ang"),
"ao": ("^", "ao"),
"ba": ("b", "a"),
"bai": ("b", "ai"),
"ban": ("b", "an"),
"bang": ("b", "ang"),
"bao": ("b", "ao"),
"be": ("b", "e"),
"bei": ("b", "ei"),
"ben": ("b", "en"),
"beng": ("b", "eng"),
"bi": ("b", "i"),
"bian": ("b", "ian"),
"biao": ("b", "iao"),
"bie": ("b", "ie"),
"bin": ("b", "in"),
"bing": ("b", "ing"),
"bo": ("b", "o"),
"bu": ("b", "u"),
"ca": ("c", "a"),
"cai": ("c", "ai"),
"can": ("c", "an"),
"cang": ("c", "ang"),
"cao": ("c", "ao"),
"ce": ("c", "e"),
"cen": ("c", "en"),
"ceng": ("c", "eng"),
"cha": ("ch", "a"),
"chai": ("ch", "ai"),
"chan": ("ch", "an"),
"chang": ("ch", "ang"),
"chao": ("ch", "ao"),
"che": ("ch", "e"),
"chen": ("ch", "en"),
"cheng": ("ch", "eng"),
"chi": ("ch", "iii"),
"chong": ("ch", "ong"),
"chou": ("ch", "ou"),
"chu": ("ch", "u"),
"chua": ("ch", "ua"),
"chuai": ("ch", "uai"),
"chuan": ("ch", "uan"),
"chuang": ("ch", "uang"),
"chui": ("ch", "uei"),
"chun": ("ch", "uen"),
"chuo": ("ch", "uo"),
"ci": ("c", "ii"),
"cong": ("c", "ong"),
"cou": ("c", "ou"),
"cu": ("c", "u"),
"cuan": ("c", "uan"),
"cui": ("c", "uei"),
"cun": ("c", "uen"),
"cuo": ("c", "uo"),
"da": ("d", "a"),
"dai": ("d", "ai"),
"dan": ("d", "an"),
"dang": ("d", "ang"),
"dao": ("d", "ao"),
"de": ("d", "e"),
"dei": ("d", "ei"),
"den": ("d", "en"),
"deng": ("d", "eng"),
"di": ("d", "i"),
"dia": ("d", "ia"),
"dian": ("d", "ian"),
"diao": ("d", "iao"),
"die": ("d", "ie"),
"ding": ("d", "ing"),
"diu": ("d", "iou"),
"dong": ("d", "ong"),
"dou": ("d", "ou"),
"du": ("d", "u"),
"duan": ("d", "uan"),
"dui": ("d", "uei"),
"dun": ("d", "uen"),
"duo": ("d", "uo"),
"e": ("^", "e"),
"ei": ("^", "ei"),
"en": ("^", "en"),
"ng": ("^", "en"),
"eng": ("^", "eng"),
"er": ("^", "er"),
"fa": ("f", "a"),
"fan": ("f", "an"),
"fang": ("f", "ang"),
"fei": ("f", "ei"),
"fen": ("f", "en"),
"feng": ("f", "eng"),
"fo": ("f", "o"),
"fou": ("f", "ou"),
"fu": ("f", "u"),
"ga": ("g", "a"),
"gai": ("g", "ai"),
"gan": ("g", "an"),
"gang": ("g", "ang"),
"gao": ("g", "ao"),
"ge": ("g", "e"),
"gei": ("g", "ei"),
"gen": ("g", "en"),
"geng": ("g", "eng"),
"gong": ("g", "ong"),
"gou": ("g", "ou"),
"gu": ("g", "u"),
"gua": ("g", "ua"),
"guai": ("g", "uai"),
"guan": ("g", "uan"),
"guang": ("g", "uang"),
"gui": ("g", "uei"),
"gun": ("g", "uen"),
"guo": ("g", "uo"),
"ha": ("h", "a"),
"hai": ("h", "ai"),
"han": ("h", "an"),
"hang": ("h", "ang"),
"hao": ("h", "ao"),
"he": ("h", "e"),
"hei": ("h", "ei"),
"hen": ("h", "en"),
"heng": ("h", "eng"),
"hong": ("h", "ong"),
"hou": ("h", "ou"),
"hu": ("h", "u"),
"hua": ("h", "ua"),
"huai": ("h", "uai"),
"huan": ("h", "uan"),
"huang": ("h", "uang"),
"hui": ("h", "uei"),
"hun": ("h", "uen"),
"huo": ("h", "uo"),
"ji": ("j", "i"),
"jia": ("j", "ia"),
"jian": ("j", "ian"),
"jiang": ("j", "iang"),
"jiao": ("j", "iao"),
"jie": ("j", "ie"),
"jin": ("j", "in"),
"jing": ("j", "ing"),
"jiong": ("j", "iong"),
"jiu": ("j", "iou"),
"ju": ("j", "v"),
"juan": ("j", "van"),
"jue": ("j", "ve"),
"jun": ("j", "vn"),
"ka": ("k", "a"),
"kai": ("k", "ai"),
"kan": ("k", "an"),
"kang": ("k", "ang"),
"kao": ("k", "ao"),
"ke": ("k", "e"),
"kei": ("k", "ei"),
"ken": ("k", "en"),
"keng": ("k", "eng"),
"kong": ("k", "ong"),
"kou": ("k", "ou"),
"ku": ("k", "u"),
"kua": ("k", "ua"),
"kuai": ("k", "uai"),
"kuan": ("k", "uan"),
"kuang": ("k", "uang"),
"kui": ("k", "uei"),
"kun": ("k", "uen"),
"kuo": ("k", "uo"),
"la": ("l", "a"),
"lai": ("l", "ai"),
"lan": ("l", "an"),
"lang": ("l", "ang"),
"lao": ("l", "ao"),
"le": ("l", "e"),
"lei": ("l", "ei"),
"leng": ("l", "eng"),
"li": ("l", "i"),
"lia": ("l", "ia"),
"lian": ("l", "ian"),
"liang": ("l", "iang"),
"liao": ("l", "iao"),
"lie": ("l", "ie"),
"lin": ("l", "in"),
"ling": ("l", "ing"),
"liu": ("l", "iou"),
"lo": ("l", "o"),
"long": ("l", "ong"),
"lou": ("l", "ou"),
"lu": ("l", "u"),
"lv": ("l", "v"),
"luan": ("l", "uan"),
"lve": ("l", "ve"),
"lue": ("l", "ve"),
"lun": ("l", "uen"),
"luo": ("l", "uo"),
"ma": ("m", "a"),
"mai": ("m", "ai"),
"man": ("m", "an"),
"mang": ("m", "ang"),
"mao": ("m", "ao"),
"me": ("m", "e"),
"mei": ("m", "ei"),
"men": ("m", "en"),
"meng": ("m", "eng"),
"mi": ("m", "i"),
"mian": ("m", "ian"),
"miao": ("m", "iao"),
"mie": ("m", "ie"),
"min": ("m", "in"),
"ming": ("m", "ing"),
"miu": ("m", "iou"),
"mo": ("m", "o"),
"mou": ("m", "ou"),
"mu": ("m", "u"),
"na": ("n", "a"),
"nai": ("n", "ai"),
"nan": ("n", "an"),
"nang": ("n", "ang"),
"nao": ("n", "ao"),
"ne": ("n", "e"),
"nei": ("n", "ei"),
"nen": ("n", "en"),
"neng": ("n", "eng"),
"ni": ("n", "i"),
"nia": ("n", "ia"),
"nian": ("n", "ian"),
"niang": ("n", "iang"),
"niao": ("n", "iao"),
"nie": ("n", "ie"),
"nin": ("n", "in"),
"ning": ("n", "ing"),
"niu": ("n", "iou"),
"nong": ("n", "ong"),
"nou": ("n", "ou"),
"nu": ("n", "u"),
"nv": ("n", "v"),
"nuan": ("n", "uan"),
"nve": ("n", "ve"),
"nue": ("n", "ve"),
"nuo": ("n", "uo"),
"o": ("^", "o"),
"ou": ("^", "ou"),
"pa": ("p", "a"),
"pai": ("p", "ai"),
"pan": ("p", "an"),
"pang": ("p", "ang"),
"pao": ("p", "ao"),
"pe": ("p", "e"),
"pei": ("p", "ei"),
"pen": ("p", "en"),
"peng": ("p", "eng"),
"pi": ("p", "i"),
"pian": ("p", "ian"),
"piao": ("p", "iao"),
"pie": ("p", "ie"),
"pin": ("p", "in"),
"ping": ("p", "ing"),
"po": ("p", "o"),
"pou": ("p", "ou"),
"pu": ("p", "u"),
"qi": ("q", "i"),
"qia": ("q", "ia"),
"qian": ("q", "ian"),
"qiang": ("q", "iang"),
"qiao": ("q", "iao"),
"qie": ("q", "ie"),
"qin": ("q", "in"),
"qing": ("q", "ing"),
"qiong": ("q", "iong"),
"qiu": ("q", "iou"),
"qu": ("q", "v"),
"quan": ("q", "van"),
"que": ("q", "ve"),
"qun": ("q", "vn"),
"ran": ("r", "an"),
"rang": ("r", "ang"),
"rao": ("r", "ao"),
"re": ("r", "e"),
"ren": ("r", "en"),
"reng": ("r", "eng"),
"ri": ("r", "iii"),
"rong": ("r", "ong"),
"rou": ("r", "ou"),
"ru": ("r", "u"),
"rua": ("r", "ua"),
"ruan": ("r", "uan"),
"rui": ("r", "uei"),
"run": ("r", "uen"),
"ruo": ("r", "uo"),
"sa": ("s", "a"),
"sai": ("s", "ai"),
"san": ("s", "an"),
"sang": ("s", "ang"),
"sao": ("s", "ao"),
"se": ("s", "e"),
"sen": ("s", "en"),
"seng": ("s", "eng"),
"sha": ("sh", "a"),
"shai": ("sh", "ai"),
"shan": ("sh", "an"),
"shang": ("sh", "ang"),
"shao": ("sh", "ao"),
"she": ("sh", "e"),
"shei": ("sh", "ei"),
"shen": ("sh", "en"),
"sheng": ("sh", "eng"),
"shi": ("sh", "iii"),
"shou": ("sh", "ou"),
"shu": ("sh", "u"),
"shua": ("sh", "ua"),
"shuai": ("sh", "uai"),
"shuan": ("sh", "uan"),
"shuang": ("sh", "uang"),
"shui": ("sh", "uei"),
"shun": ("sh", "uen"),
"shuo": ("sh", "uo"),
"si": ("s", "ii"),
"song": ("s", "ong"),
"sou": ("s", "ou"),
"su": ("s", "u"),
"suan": ("s", "uan"),
"sui": ("s", "uei"),
"sun": ("s", "uen"),
"suo": ("s", "uo"),
"ta": ("t", "a"),
"tai": ("t", "ai"),
"tan": ("t", "an"),
"tang": ("t", "ang"),
"tao": ("t", "ao"),
"te": ("t", "e"),
"tei": ("t", "ei"),
"teng": ("t", "eng"),
"ti": ("t", "i"),
"tian": ("t", "ian"),
"tiao": ("t", "iao"),
"tie": ("t", "ie"),
"ting": ("t", "ing"),
"tong": ("t", "ong"),
"tou": ("t", "ou"),
"tu": ("t", "u"),
"tuan": ("t", "uan"),
"tui": ("t", "uei"),
"tun": ("t", "uen"),
"tuo": ("t", "uo"),
"wa": ("^", "ua"),
"wai": ("^", "uai"),
"wan": ("^", "uan"),
"wang": ("^", "uang"),
"wei": ("^", "uei"),
"wen": ("^", "uen"),
"weng": ("^", "ueng"),
"wo": ("^", "uo"),
"wu": ("^", "u"),
"xi": ("x", "i"),
"xia": ("x", "ia"),
"xian": ("x", "ian"),
"xiang": ("x", "iang"),
"xiao": ("x", "iao"),
"xie": ("x", "ie"),
"xin": ("x", "in"),
"xing": ("x", "ing"),
"xiong": ("x", "iong"),
"xiu": ("x", "iou"),
"xu": ("x", "v"),
"xuan": ("x", "van"),
"xue": ("x", "ve"),
"xun": ("x", "vn"),
"ya": ("^", "ia"),
"yan": ("^", "ian"),
"yang": ("^", "iang"),
"yao": ("^", "iao"),
"ye": ("^", "ie"),
"yi": ("^", "i"),
"yin": ("^", "in"),
"ying": ("^", "ing"),
"yo": ("^", "iou"),
"yong": ("^", "iong"),
"you": ("^", "iou"),
"yu": ("^", "v"),
"yuan": ("^", "van"),
"yue": ("^", "ve"),
"yun": ("^", "vn"),
"za": ("z", "a"),
"zai": ("z", "ai"),
"zan": ("z", "an"),
"zang": ("z", "ang"),
"zao": ("z", "ao"),
"ze": ("z", "e"),
"zei": ("z", "ei"),
"zen": ("z", "en"),
"zeng": ("z", "eng"),
"zha": ("zh", "a"),
"zhai": ("zh", "ai"),
"zhan": ("zh", "an"),
"zhang": ("zh", "ang"),
"zhao": ("zh", "ao"),
"zhe": ("zh", "e"),
"zhei": ("zh", "ei"),
"zhen": ("zh", "en"),
"zheng": ("zh", "eng"),
"zhi": ("zh", "iii"),
"zhong": ("zh", "ong"),
"zhou": ("zh", "ou"),
"zhu": ("zh", "u"),
"zhua": ("zh", "ua"),
"zhuai": ("zh", "uai"),
"zhuan": ("zh", "uan"),
"zhuang": ("zh", "uang"),
"zhui": ("zh", "uei"),
"zhun": ("zh", "uen"),
"zhuo": ("zh", "uo"),
"zi": ("z", "ii"),
"zong": ("z", "ong"),
"zou": ("z", "ou"),
"zu": ("z", "u"),
"zuan": ("z", "uan"),
"zui": ("z", "uei"),
"zun": ("z", "uen"),
"zuo": ("z", "uo"),
}
zh_pattern = re.compile("[\u4e00-\u9fa5]")
def is_zh(word):
global zh_pattern
match = zh_pattern.search(word)
return match is not None
class MyConverter(NeutralToneWith5Mixin, DefaultConverter):
pass
@dataclass
class BakerProcessor(BaseProcessor):
pinyin_dict: Dict[str, Tuple[str, str]] = field(default_factory=lambda: PINYIN_DICT)
cleaner_names: str = None
target_rate: int = 24000
speaker_name: str = "baker"
def __post_init__(self):
super().__post_init__()
self.pinyin_parser = self.get_pinyin_parser()
def setup_eos_token(self):
return _eos[0]
def save_pretrained(self, saved_path):
os.makedirs(saved_path, exist_ok=True)
self._save_mapper(
os.path.join(saved_path, PROCESSOR_FILE_NAME),
{"pinyin_dict": self.pinyin_dict},
)
def create_items(self):
items = []
if self.data_dir:
with open(
os.path.join(self.data_dir, "ProsodyLabeling/000001-010000.txt"),
encoding="utf-8",
) as ttf:
lines = ttf.readlines()
for idx in range(0, len(lines), 2):
utt_id, chn_char = lines[idx].strip().split()
pinyin = lines[idx + 1].strip().split()
if "IY1" in pinyin or "B" in chn_char:
print(f"Skip this: {utt_id} {chn_char} {pinyin}")
continue
phonemes = self.get_phoneme_from_char_and_pinyin(chn_char, pinyin)
wav_path = os.path.join(self.data_dir, "Wave", "%s.wav" % utt_id)
items.append(
[" ".join(phonemes), wav_path, utt_id, self.speaker_name]
)
self.items = items
def get_phoneme_from_char_and_pinyin(self, chn_char, pinyin):
# we do not need #4, use sil to replace it
chn_char = chn_char.replace("#4", "")
char_len = len(chn_char)
i, j = 0, 0
result = ["sil"]
while i < char_len:
cur_char = chn_char[i]
if is_zh(cur_char):
if pinyin[j][:-1] not in self.pinyin_dict:
assert chn_char[i + 1] == "儿"
assert pinyin[j][-2] == "r"
tone = pinyin[j][-1]
a = pinyin[j][:-2]
a1, a2 = self.pinyin_dict[a]
result += [a1, a2 + tone, "er5"]
if i + 2 < char_len and chn_char[i + 2] != "#":
result.append("#0")
i += 2
j += 1
else:
tone = pinyin[j][-1]
a = pinyin[j][:-1]
a1, a2 = self.pinyin_dict[a]
result += [a1, a2 + tone]
if i + 1 < char_len and chn_char[i + 1] != "#":
result.append("#0")
i += 1
j += 1
elif cur_char == "#":
result.append(chn_char[i : i + 2])
i += 2
else:
# ignore the unknown char and punctuation
# result.append(chn_char[i])
i += 1
if result[-1] == "#0":
result = result[:-1]
result.append("sil")
assert j == len(pinyin)
return result
def get_one_sample(self, item):
text, wav_file, utt_id, speaker_name = item
# normalize audio signal to be [-1, 1], soundfile already norm.
audio, rate = sf.read(wav_file)
audio = audio.astype(np.float32)
if rate != self.target_rate:
assert rate > self.target_rate
audio = librosa.resample(audio, rate, self.target_rate)
# convert text to ids
try:
text_ids = np.asarray(self.text_to_sequence(text), np.int32)
except Exception as e:
print(e, utt_id, text)
return None
# return None
sample = {
"raw_text": text,
"text_ids": text_ids,
"audio": audio,
"utt_id": str(int(utt_id)),
"speaker_name": speaker_name,
"rate": self.target_rate,
}
return sample
def get_pinyin_parser(self):
my_pinyin = Pinyin(MyConverter())
pinyin = my_pinyin.pinyin
return pinyin
def text_to_sequence(self, text, inference=False):
if inference:
pinyin = self.pinyin_parser(text, style=Style.TONE3, errors="ignore")
new_pinyin = []
for x in pinyin:
x = "".join(x)
if "#" not in x:
new_pinyin.append(x)
phonemes = self.get_phoneme_from_char_and_pinyin(text, new_pinyin)
text = " ".join(phonemes)
print(f"phoneme seq: {text}")
sequence = []
for symbol in text.split():
idx = self.symbol_to_id[symbol]
sequence.append(idx)
# add eos tokens
sequence += [self.eos_id]
return sequence
| 17,400 | 24.477306 | 91 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/processor/thorsten.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform preprocessing and raw feature extraction for LJSpeech dataset."""
import os
import re
import numpy as np
import soundfile as sf
from dataclasses import dataclass
from tensorflow_tts.processor import BaseProcessor
from tensorflow_tts.utils import cleaners
from tensorflow_tts.utils.utils import PROCESSOR_FILE_NAME
_pad = "pad"
_eos = "eos"
_punctuation = "!'(),.? "
_special = "-"
_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
# Export all symbols:
THORSTEN_SYMBOLS = (
[_pad] + list(_special) + list(_punctuation) + list(_letters) + [_eos]
)
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r"(.*?)\{(.+?)\}(.*)")
@dataclass
class ThorstenProcessor(BaseProcessor):
"""Thorsten processor."""
cleaner_names: str = "german_cleaners"
positions = {
"wave_file": 0,
"text_norm": 1,
}
train_f_name: str = "metadata.csv"
def create_items(self):
if self.data_dir:
with open(
os.path.join(self.data_dir, self.train_f_name), encoding="utf-8"
) as f:
self.items = [self.split_line(self.data_dir, line, "|") for line in f]
def split_line(self, data_dir, line, split):
parts = line.strip().split(split)
wave_file = parts[self.positions["wave_file"]]
text_norm = parts[self.positions["text_norm"]]
wav_path = os.path.join(data_dir, "wavs", f"{wave_file}.wav")
speaker_name = "thorsten"
return text_norm, wav_path, speaker_name
def setup_eos_token(self):
return _eos
def save_pretrained(self, saved_path):
os.makedirs(saved_path, exist_ok=True)
self._save_mapper(os.path.join(saved_path, PROCESSOR_FILE_NAME), {})
def get_one_sample(self, item):
text, wav_path, speaker_name = item
# normalize audio signal to be [-1, 1], soundfile already norm.
audio, rate = sf.read(wav_path)
audio = audio.astype(np.float32)
# convert text to ids
text_ids = np.asarray(self.text_to_sequence(text), np.int32)
sample = {
"raw_text": text,
"text_ids": text_ids,
"audio": audio,
"utt_id": os.path.split(wav_path)[-1].split(".")[0],
"speaker_name": speaker_name,
"rate": rate,
}
return sample
def text_to_sequence(self, text):
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += self._symbols_to_sequence(
self._clean_text(text, [self.cleaner_names])
)
break
sequence += self._symbols_to_sequence(
self._clean_text(m.group(1), [self.cleaner_names])
)
sequence += self._arpabet_to_sequence(m.group(2))
text = m.group(3)
# add eos tokens
sequence += [self.eos_id]
return sequence
def _clean_text(self, text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception("Unknown cleaner: %s" % name)
text = cleaner(text)
return text
def _symbols_to_sequence(self, symbols):
return [self.symbol_to_id[s] for s in symbols if self._should_keep_symbol(s)]
def _arpabet_to_sequence(self, text):
return self._symbols_to_sequence(["@" + s for s in text.split()])
def _should_keep_symbol(self, s):
return s in self.symbol_to_id and s != "_" and s != "~"
| 4,298 | 31.568182 | 86 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/bin/__init__.py | 0 | 0 | 0 | py | |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/bin/preprocess.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Perform preprocessing, with raw feature extraction and normalization of train/valid split."""
import argparse
import glob
import logging
import os
import yaml
import librosa
import numpy as np
import pyworld as pw
from functools import partial
from multiprocessing import Pool
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from tensorflow_tts.processor import LJSpeechProcessor
from tensorflow_tts.processor import BakerProcessor
from tensorflow_tts.processor import KSSProcessor
from tensorflow_tts.processor import LibriTTSProcessor
from tensorflow_tts.processor import ThorstenProcessor
from tensorflow_tts.processor import LJSpeechUltimateProcessor
from tensorflow_tts.processor import SynpaflexProcessor
from tensorflow_tts.processor import JSUTProcessor
from tensorflow_tts.processor.ljspeech import LJSPEECH_SYMBOLS
from tensorflow_tts.processor.baker import BAKER_SYMBOLS
from tensorflow_tts.processor.kss import KSS_SYMBOLS
from tensorflow_tts.processor.libritts import LIBRITTS_SYMBOLS
from tensorflow_tts.processor.thorsten import THORSTEN_SYMBOLS
from tensorflow_tts.processor.ljspeechu import LJSPEECH_U_SYMBOLS
from tensorflow_tts.processor.synpaflex import SYNPAFLEX_SYMBOLS
from tensorflow_tts.processor.jsut import JSUT_SYMBOLS
from tensorflow_tts.utils import remove_outlier
os.environ["CUDA_VISIBLE_DEVICES"] = ""
def parse_and_config():
"""Parse arguments and set configuration parameters."""
parser = argparse.ArgumentParser(
description="Preprocess audio and text features "
"(See detail in tensorflow_tts/bin/preprocess_dataset.py)."
)
parser.add_argument(
"--rootdir",
default=None,
type=str,
required=True,
help="Directory containing the dataset files.",
)
parser.add_argument(
"--outdir",
default=None,
type=str,
required=True,
help="Output directory where features will be saved.",
)
parser.add_argument(
"--dataset",
type=str,
default="ljspeech",
choices=["ljspeech", "kss", "libritts", "baker", "thorsten", "ljspeechu", "synpaflex", "jsut"],
help="Dataset to preprocess.",
)
parser.add_argument(
"--config", type=str, required=True, help="YAML format configuration file."
)
parser.add_argument(
"--n_cpus",
type=int,
default=4,
required=False,
help="Number of CPUs to use in parallel.",
)
parser.add_argument(
"--test_size",
type=float,
default=0.05,
required=False,
help="Proportion of files to use as test dataset.",
)
parser.add_argument(
"--verbose",
type=int,
default=0,
choices=[0, 1, 2],
help="Logging level. 0: DEBUG, 1: INFO and WARNING, 2: INFO, WARNING, and ERROR",
)
args = parser.parse_args()
# set logger
FORMAT = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
log_level = {0: logging.DEBUG, 1: logging.WARNING, 2: logging.ERROR}
logging.basicConfig(level=log_level[args.verbose], format=FORMAT)
# load config
config = yaml.load(open(args.config), Loader=yaml.SafeLoader)
config.update(vars(args))
# config checks
assert config["format"] == "npy", "'npy' is the only supported format."
return config
def ph_based_trim(
config,
utt_id: str,
text_ids: np.array,
raw_text: str,
audio: np.array,
hop_size: int,
) -> (bool, np.array, np.array):
"""
Args:
config: Parsed yaml config
utt_id: file name
text_ids: array with text ids
raw_text: raw text of file
audio: parsed wav file
hop_size: Hop size
Returns: (bool, np.array, np.array) => if trimmed return True, new text_ids, new audio_array
"""
os.makedirs(os.path.join(config["rootdir"], "trimmed-durations"), exist_ok=True)
duration_path = config.get(
"duration_path", os.path.join(config["rootdir"], "durations")
)
duration_fixed_path = config.get(
"duration_fixed_path", os.path.join(config["rootdir"], "trimmed-durations")
)
sil_ph = ["SIL", "END"] # TODO FIX hardcoded values
text = raw_text.split(" ")
trim_start, trim_end = False, False
if text[0] in sil_ph:
trim_start = True
if text[-1] in sil_ph:
trim_end = True
if not trim_start and not trim_end:
return False, text_ids, audio
idx_start, idx_end = (
0 if not trim_start else 1,
text_ids.__len__() if not trim_end else -1,
)
text_ids = text_ids[idx_start:idx_end]
durations = np.load(os.path.join(duration_path, f"{utt_id}-durations.npy"))
if trim_start:
s_trim = int(durations[0] * hop_size)
audio = audio[s_trim:]
if trim_end:
e_trim = int(durations[-1] * hop_size)
audio = audio[:-e_trim]
durations = durations[idx_start:idx_end]
np.save(os.path.join(duration_fixed_path, f"{utt_id}-durations.npy"), durations)
return True, text_ids, audio
def gen_audio_features(item, config):
"""Generate audio features and transformations
Args:
item (Dict): dictionary containing the attributes to encode.
config (Dict): configuration dictionary.
Returns:
(bool): keep this sample or not.
mel (ndarray): mel matrix in np.float32.
energy (ndarray): energy audio profile.
f0 (ndarray): fundamental frequency.
item (Dict): dictionary containing the updated attributes.
"""
# get info from sample.
audio = item["audio"]
utt_id = item["utt_id"]
rate = item["rate"]
# check audio properties
assert len(audio.shape) == 1, f"{utt_id} seems to be multi-channel signal."
assert np.abs(audio).max() <= 1.0, f"{utt_id} is different from 16 bit PCM."
# check sample rate
if rate != config["sampling_rate"]:
audio = librosa.resample(audio, rate, config["sampling_rate"])
logging.info(f"{utt_id} sampling rate is {rate}, not {config['sampling_rate']}, we resample it.")
# trim silence
if config["trim_silence"]:
if "trim_mfa" in config and config["trim_mfa"]:
_, item["text_ids"], audio = ph_based_trim(
config,
utt_id,
item["text_ids"],
item["raw_text"],
audio,
config["hop_size"],
)
if (
audio.__len__() < 1
): # very short files can get trimmed fully if mfa didnt extract any tokens LibriTTS maybe take only longer files?
logging.warning(
f"File have only silence or MFA didnt extract any token {utt_id}"
)
return False, None, None, None, item
else:
audio, _ = librosa.effects.trim(
audio,
top_db=config["trim_threshold_in_db"],
frame_length=config["trim_frame_size"],
hop_length=config["trim_hop_size"],
)
# resample audio if necessary
if "sampling_rate_for_feats" in config:
audio = librosa.resample(audio, rate, config["sampling_rate_for_feats"])
sampling_rate = config["sampling_rate_for_feats"]
assert (
config["hop_size"] * config["sampling_rate_for_feats"] % rate == 0
), "'hop_size' must be 'int' value. Please check if 'sampling_rate_for_feats' is correct."
hop_size = config["hop_size"] * config["sampling_rate_for_feats"] // rate
else:
sampling_rate = config["sampling_rate"]
hop_size = config["hop_size"]
# get spectrogram
D = librosa.stft(
audio,
n_fft=config["fft_size"],
hop_length=hop_size,
win_length=config["win_length"],
window=config["window"],
pad_mode="reflect",
)
S, _ = librosa.magphase(D) # (#bins, #frames)
# get mel basis
fmin = 0 if config["fmin"] is None else config["fmin"]
fmax = sampling_rate // 2 if config["fmax"] is None else config["fmax"]
mel_basis = librosa.filters.mel(
sr=sampling_rate,
n_fft=config["fft_size"],
n_mels=config["num_mels"],
fmin=fmin,
fmax=fmax,
)
mel = np.log10(np.maximum(np.dot(mel_basis, S), 1e-10)).T # (#frames, #bins)
# check audio and feature length
audio = np.pad(audio, (0, config["fft_size"]), mode="edge")
audio = audio[: len(mel) * hop_size]
assert len(mel) * hop_size == len(audio)
# extract raw pitch
_f0, t = pw.dio(
audio.astype(np.double),
fs=sampling_rate,
f0_ceil=fmax,
frame_period=1000 * hop_size / sampling_rate,
)
f0 = pw.stonemask(audio.astype(np.double), _f0, t, sampling_rate)
if len(f0) >= len(mel):
f0 = f0[: len(mel)]
else:
f0 = np.pad(f0, (0, len(mel) - len(f0)))
# extract energy
energy = np.sqrt(np.sum(S ** 2, axis=0))
assert len(mel) == len(f0) == len(energy)
# remove outlier f0/energy
f0 = remove_outlier(f0)
energy = remove_outlier(energy)
# apply global gain
if config["global_gain_scale"] > 0.0:
audio *= config["global_gain_scale"]
if np.abs(audio).max() >= 1.0:
logging.warn(
f"{utt_id} causes clipping. It is better to reconsider global gain scale value."
)
item["audio"] = audio
item["mel"] = mel
item["f0"] = f0
item["energy"] = energy
return True, mel, energy, f0, item
def save_statistics_to_file(scaler_list, config):
"""Save computed statistics to disk.
Args:
scaler_list (List): List of scalers containing statistics to save.
config (Dict): configuration dictionary.
"""
for scaler, name in scaler_list:
stats = np.stack((scaler.mean_, scaler.scale_))
np.save(
os.path.join(config["outdir"], f"stats{name}.npy"),
stats.astype(np.float32),
allow_pickle=False,
)
def save_features_to_file(features, subdir, config):
"""Save transformed dataset features in disk.
Args:
features (Dict): dictionary containing the attributes to save.
subdir (str): data split folder where features will be saved.
config (Dict): configuration dictionary.
"""
utt_id = features["utt_id"]
if config["format"] == "npy":
save_list = [
(features["audio"], "wavs", "wave", np.float32),
(features["mel"], "raw-feats", "raw-feats", np.float32),
(features["text_ids"], "ids", "ids", np.int32),
(features["f0"], "raw-f0", "raw-f0", np.float32),
(features["energy"], "raw-energies", "raw-energy", np.float32),
]
for item, name_dir, name_file, fmt in save_list:
np.save(
os.path.join(
config["outdir"], subdir, name_dir, f"{utt_id}-{name_file}.npy"
),
item.astype(fmt),
allow_pickle=False,
)
else:
raise ValueError("'npy' is the only supported format.")
def preprocess():
"""Run preprocessing process and compute statistics for normalizing."""
config = parse_and_config()
dataset_processor = {
"ljspeech": LJSpeechProcessor,
"kss": KSSProcessor,
"libritts": LibriTTSProcessor,
"baker": BakerProcessor,
"thorsten": ThorstenProcessor,
"ljspeechu": LJSpeechUltimateProcessor,
"synpaflex": SynpaflexProcessor,
"jsut": JSUTProcessor,
}
dataset_symbol = {
"ljspeech": LJSPEECH_SYMBOLS,
"kss": KSS_SYMBOLS,
"libritts": LIBRITTS_SYMBOLS,
"baker": BAKER_SYMBOLS,
"thorsten": THORSTEN_SYMBOLS,
"ljspeechu": LJSPEECH_U_SYMBOLS,
"synpaflex": SYNPAFLEX_SYMBOLS,
"jsut": JSUT_SYMBOLS,
}
dataset_cleaner = {
"ljspeech": "english_cleaners",
"kss": "korean_cleaners",
"libritts": None,
"baker": None,
"thorsten": "german_cleaners",
"ljspeechu": "english_cleaners",
"synpaflex": "basic_cleaners",
"jsut": None,
}
logging.info(f"Selected '{config['dataset']}' processor.")
processor = dataset_processor[config["dataset"]](
config["rootdir"],
symbols=dataset_symbol[config["dataset"]],
cleaner_names=dataset_cleaner[config["dataset"]],
)
# check output directories
build_dir = lambda x: [
os.makedirs(os.path.join(config["outdir"], x, y), exist_ok=True)
for y in ["raw-feats", "wavs", "ids", "raw-f0", "raw-energies"]
]
build_dir("train")
build_dir("valid")
# save pretrained-processor to feature dir
processor._save_mapper(
os.path.join(config["outdir"], f"{config['dataset']}_mapper.json"),
extra_attrs_to_save={"pinyin_dict": processor.pinyin_dict}
if config["dataset"] == "baker"
else {},
)
# build train test split
if config["dataset"] == "libritts":
train_split, valid_split, _, _ = train_test_split(
processor.items,
[i[-1] for i in processor.items],
test_size=config["test_size"],
random_state=42,
shuffle=True,
)
else:
train_split, valid_split = train_test_split(
processor.items,
test_size=config["test_size"],
random_state=42,
shuffle=True,
)
logging.info(f"Training items: {len(train_split)}")
logging.info(f"Validation items: {len(valid_split)}")
get_utt_id = lambda x: os.path.split(x[1])[-1].split(".")[0]
train_utt_ids = [get_utt_id(x) for x in train_split]
valid_utt_ids = [get_utt_id(x) for x in valid_split]
# save train and valid utt_ids to track later
np.save(os.path.join(config["outdir"], "train_utt_ids.npy"), train_utt_ids)
np.save(os.path.join(config["outdir"], "valid_utt_ids.npy"), valid_utt_ids)
# define map iterator
def iterator_data(items_list):
for item in items_list:
yield processor.get_one_sample(item)
train_iterator_data = iterator_data(train_split)
valid_iterator_data = iterator_data(valid_split)
p = Pool(config["n_cpus"])
# preprocess train files and get statistics for normalizing
partial_fn = partial(gen_audio_features, config=config)
train_map = p.imap_unordered(
partial_fn,
tqdm(train_iterator_data, total=len(train_split), desc="[Preprocessing train]"),
chunksize=10,
)
# init scaler for multiple features
scaler_mel = StandardScaler(copy=False)
scaler_energy = StandardScaler(copy=False)
scaler_f0 = StandardScaler(copy=False)
id_to_remove = []
for result, mel, energy, f0, features in train_map:
if not result:
id_to_remove.append(features["utt_id"])
continue
save_features_to_file(features, "train", config)
# partial fitting of scalers
if len(energy[energy != 0]) == 0 or len(f0[f0 != 0]) == 0:
id_to_remove.append(features["utt_id"])
continue
# partial fitting of scalers
if len(energy[energy != 0]) == 0 or len(f0[f0 != 0]) == 0:
id_to_remove.append(features["utt_id"])
continue
scaler_mel.partial_fit(mel)
scaler_energy.partial_fit(energy[energy != 0].reshape(-1, 1))
scaler_f0.partial_fit(f0[f0 != 0].reshape(-1, 1))
if len(id_to_remove) > 0:
np.save(
os.path.join(config["outdir"], "train_utt_ids.npy"),
[i for i in train_utt_ids if i not in id_to_remove],
)
logging.info(
f"removed {len(id_to_remove)} cause of too many outliers or bad mfa extraction"
)
# save statistics to file
logging.info("Saving computed statistics.")
scaler_list = [(scaler_mel, ""), (scaler_energy, "_energy"), (scaler_f0, "_f0")]
save_statistics_to_file(scaler_list, config)
# preprocess valid files
partial_fn = partial(gen_audio_features, config=config)
valid_map = p.imap_unordered(
partial_fn,
tqdm(valid_iterator_data, total=len(valid_split), desc="[Preprocessing valid]"),
chunksize=10,
)
for *_, features in valid_map:
save_features_to_file(features, "valid", config)
def gen_normal_mel(mel_path, scaler, config):
"""Normalize the mel spectrogram and save it to the corresponding path.
Args:
mel_path (string): path of the mel spectrogram to normalize.
scaler (sklearn.base.BaseEstimator): scaling function to use for normalize.
config (Dict): configuration dictionary.
"""
mel = np.load(mel_path)
mel_norm = scaler.transform(mel)
path, file_name = os.path.split(mel_path)
*_, subdir, suffix = path.split(os.sep)
utt_id = file_name.split(f"-{suffix}.npy")[0]
np.save(
os.path.join(
config["outdir"], subdir, "norm-feats", f"{utt_id}-norm-feats.npy"
),
mel_norm.astype(np.float32),
allow_pickle=False,
)
def normalize():
"""Normalize mel spectrogram with pre-computed statistics."""
config = parse_and_config()
if config["format"] == "npy":
# init scaler with saved values
scaler = StandardScaler()
scaler.mean_, scaler.scale_ = np.load(
os.path.join(config["outdir"], "stats.npy")
)
scaler.n_features_in_ = config["num_mels"]
else:
raise ValueError("'npy' is the only supported format.")
# find all "raw-feats" files in both train and valid folders
glob_path = os.path.join(config["rootdir"], "**", "raw-feats", "*.npy")
mel_raw_feats = glob.glob(glob_path, recursive=True)
logging.info(f"Files to normalize: {len(mel_raw_feats)}")
# check for output directories
os.makedirs(os.path.join(config["outdir"], "train", "norm-feats"), exist_ok=True)
os.makedirs(os.path.join(config["outdir"], "valid", "norm-feats"), exist_ok=True)
p = Pool(config["n_cpus"])
partial_fn = partial(gen_normal_mel, scaler=scaler, config=config)
list(p.map(partial_fn, tqdm(mel_raw_feats, desc="[Normalizing]")))
def compute_statistics():
"""Compute mean / std statistics of some features for later normalization."""
config = parse_and_config()
# find features files for the train split
glob_fn = lambda x: glob.glob(os.path.join(config["rootdir"], "train", x, "*.npy"))
glob_mel = glob_fn("raw-feats")
glob_f0 = glob_fn("raw-f0")
glob_energy = glob_fn("raw-energies")
assert (
len(glob_mel) == len(glob_f0) == len(glob_energy)
), "Features, f0 and energies have different files in training split."
logging.info(f"Computing statistics for {len(glob_mel)} files.")
# init scaler for multiple features
scaler_mel = StandardScaler(copy=False)
scaler_energy = StandardScaler(copy=False)
scaler_f0 = StandardScaler(copy=False)
for mel, f0, energy in tqdm(
zip(glob_mel, glob_f0, glob_energy), total=len(glob_mel)
):
# remove outliers
energy = np.load(energy)
f0 = np.load(f0)
# partial fitting of scalers
scaler_mel.partial_fit(np.load(mel))
scaler_energy.partial_fit(energy[energy != 0].reshape(-1, 1))
scaler_f0.partial_fit(f0[f0 != 0].reshape(-1, 1))
# save statistics to file
logging.info("Saving computed statistics.")
scaler_list = [(scaler_mel, ""), (scaler_energy, "_energy"), (scaler_f0, "_f0")]
save_statistics_to_file(scaler_list, config)
if __name__ == "__main__":
preprocess()
| 20,390 | 33.619694 | 127 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/datasets/audio_dataset.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Audio modules."""
import logging
import os
import numpy as np
import tensorflow as tf
from tensorflow_tts.datasets.abstract_dataset import AbstractDataset
from tensorflow_tts.utils import find_files
class AudioDataset(AbstractDataset):
"""Tensorflow compatible audio dataset."""
def __init__(
self,
root_dir,
audio_query="*-wave.npy",
audio_load_fn=np.load,
audio_length_threshold=0,
):
"""Initialize dataset.
Args:
root_dir (str): Root directory including dumped files.
audio_query (str): Query to find feature files in root_dir.
audio_load_fn (func): Function to load feature file.
audio_length_threshold (int): Threshold to remove short feature files.
return_utt_id (bool): Whether to return the utterance id with arrays.
"""
# find all of mel files.
audio_files = sorted(find_files(root_dir, audio_query))
audio_lengths = [audio_load_fn(f).shape[0] for f in audio_files]
# assert the number of files
assert len(audio_files) != 0, f"Not found any mel files in ${root_dir}."
if ".npy" in audio_query:
suffix = audio_query[1:]
utt_ids = [os.path.basename(f).replace(suffix, "") for f in audio_files]
# set global params
self.utt_ids = utt_ids
self.audio_files = audio_files
self.audio_lengths = audio_lengths
self.audio_load_fn = audio_load_fn
self.audio_length_threshold = audio_length_threshold
def get_args(self):
return [self.utt_ids]
def generator(self, utt_ids):
for i, utt_id in enumerate(utt_ids):
audio_file = self.audio_files[i]
audio = self.audio_load_fn(audio_file)
audio_length = self.audio_lengths[i]
items = {"utt_ids": utt_id, "audios": audio, "audio_lengths": audio_length}
yield items
def get_output_dtypes(self):
output_types = {
"utt_ids": tf.string,
"audios": tf.float32,
"audio_lengths": tf.float32,
}
return output_types
def create(
self,
allow_cache=False,
batch_size=1,
is_shuffle=False,
map_fn=None,
reshuffle_each_iteration=True,
):
"""Create tf.dataset function."""
output_types = self.get_output_dtypes()
datasets = tf.data.Dataset.from_generator(
self.generator, output_types=output_types, args=(self.get_args())
)
datasets = datasets.filter(
lambda x: x["audio_lengths"] > self.audio_length_threshold
)
if allow_cache:
datasets = datasets.cache()
if is_shuffle:
datasets = datasets.shuffle(
self.get_len_dataset(),
reshuffle_each_iteration=reshuffle_each_iteration,
)
# define padded shapes
padded_shapes = {
"utt_ids": [],
"audios": [None],
"audio_lengths": [],
}
datasets = datasets.padded_batch(batch_size, padded_shapes=padded_shapes)
datasets = datasets.prefetch(tf.data.experimental.AUTOTUNE)
return datasets
def get_len_dataset(self):
return len(self.utt_ids)
def __name__(self):
return "AudioDataset"
| 4,007 | 30.069767 | 87 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/datasets/abstract_dataset.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Abstract Dataset modules."""
import abc
import tensorflow as tf
class AbstractDataset(metaclass=abc.ABCMeta):
"""Abstract Dataset module for Dataset Loader."""
@abc.abstractmethod
def get_args(self):
"""Return args for generator function."""
pass
@abc.abstractmethod
def generator(self):
"""Generator function, should have args from get_args function."""
pass
@abc.abstractmethod
def get_output_dtypes(self):
"""Return output dtypes for each element from generator."""
pass
@abc.abstractmethod
def get_len_dataset(self):
"""Return number of samples on dataset."""
pass
def create(
self,
allow_cache=False,
batch_size=1,
is_shuffle=False,
map_fn=None,
reshuffle_each_iteration=True,
):
"""Create tf.dataset function."""
output_types = self.get_output_dtypes()
datasets = tf.data.Dataset.from_generator(
self.generator, output_types=output_types, args=(self.get_args())
)
if allow_cache:
datasets = datasets.cache()
if is_shuffle:
datasets = datasets.shuffle(
self.get_len_dataset(),
reshuffle_each_iteration=reshuffle_each_iteration,
)
if batch_size > 1 and map_fn is None:
raise ValueError("map function must define when batch_size > 1.")
if map_fn is not None:
datasets = datasets.map(map_fn, tf.data.experimental.AUTOTUNE)
datasets = datasets.batch(batch_size)
datasets = datasets.prefetch(tf.data.experimental.AUTOTUNE)
return datasets
| 2,320 | 28.75641 | 77 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/datasets/mel_dataset.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset modules."""
import logging
import os
import numpy as np
import tensorflow as tf
from tensorflow_tts.datasets.abstract_dataset import AbstractDataset
from tensorflow_tts.utils import find_files
class MelDataset(AbstractDataset):
"""Tensorflow compatible mel dataset."""
def __init__(
self,
root_dir,
mel_query="*-raw-feats.h5",
mel_load_fn=np.load,
mel_length_threshold=0,
):
"""Initialize dataset.
Args:
root_dir (str): Root directory including dumped files.
mel_query (str): Query to find feature files in root_dir.
mel_load_fn (func): Function to load feature file.
mel_length_threshold (int): Threshold to remove short feature files.
"""
# find all of mel files.
mel_files = sorted(find_files(root_dir, mel_query))
mel_lengths = [mel_load_fn(f).shape[0] for f in mel_files]
# assert the number of files
assert len(mel_files) != 0, f"Not found any mel files in ${root_dir}."
if ".npy" in mel_query:
suffix = mel_query[1:]
utt_ids = [os.path.basename(f).replace(suffix, "") for f in mel_files]
# set global params
self.utt_ids = utt_ids
self.mel_files = mel_files
self.mel_lengths = mel_lengths
self.mel_load_fn = mel_load_fn
self.mel_length_threshold = mel_length_threshold
def get_args(self):
return [self.utt_ids]
def generator(self, utt_ids):
for i, utt_id in enumerate(utt_ids):
mel_file = self.mel_files[i]
mel = self.mel_load_fn(mel_file)
mel_length = self.mel_lengths[i]
items = {"utt_ids": utt_id, "mels": mel, "mel_lengths": mel_length}
yield items
def get_output_dtypes(self):
output_types = {
"utt_ids": tf.string,
"mels": tf.float32,
"mel_lengths": tf.int32,
}
return output_types
def create(
self,
allow_cache=False,
batch_size=1,
is_shuffle=False,
map_fn=None,
reshuffle_each_iteration=True,
):
"""Create tf.dataset function."""
output_types = self.get_output_dtypes()
datasets = tf.data.Dataset.from_generator(
self.generator, output_types=output_types, args=(self.get_args())
)
datasets = datasets.filter(
lambda x: x["mel_lengths"] > self.mel_length_threshold
)
if allow_cache:
datasets = datasets.cache()
if is_shuffle:
datasets = datasets.shuffle(
self.get_len_dataset(),
reshuffle_each_iteration=reshuffle_each_iteration,
)
# define padded shapes
padded_shapes = {
"utt_ids": [],
"mels": [None, 80],
"mel_lengths": [],
}
datasets = datasets.padded_batch(batch_size, padded_shapes=padded_shapes)
datasets = datasets.prefetch(tf.data.experimental.AUTOTUNE)
return datasets
def get_len_dataset(self):
return len(self.utt_ids)
def __name__(self):
return "MelDataset"
| 3,847 | 29.0625 | 82 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/datasets/__init__.py | from tensorflow_tts.datasets.abstract_dataset import AbstractDataset
from tensorflow_tts.datasets.audio_dataset import AudioDataset
from tensorflow_tts.datasets.mel_dataset import MelDataset
| 191 | 47 | 68 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/configs/parallel_wavegan.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ParallelWaveGAN Config object."""
from tensorflow_tts.configs import BaseConfig
class ParallelWaveGANGeneratorConfig(BaseConfig):
"""Initialize ParallelWaveGAN Generator Config."""
def __init__(
self,
out_channels=1,
kernel_size=3,
n_layers=30,
stacks=3,
residual_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=80,
aux_context_window=2,
dropout_rate=0.0,
use_bias=True,
use_causal_conv=False,
upsample_conditional_features=True,
upsample_params={"upsample_scales": [4, 4, 4, 4]},
initializer_seed=42,
**kwargs,
):
"""Init parameters for ParallelWaveGAN Generator model."""
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.stacks = stacks
self.residual_channels = residual_channels
self.gate_channels = gate_channels
self.skip_channels = skip_channels
self.aux_channels = aux_channels
self.aux_context_window = aux_context_window
self.dropout_rate = dropout_rate
self.use_bias = use_bias
self.use_causal_conv = use_causal_conv
self.upsample_conditional_features = upsample_conditional_features
self.upsample_params = upsample_params
self.initializer_seed = initializer_seed
class ParallelWaveGANDiscriminatorConfig(object):
"""Initialize ParallelWaveGAN Discriminator Config."""
def __init__(
self,
out_channels=1,
kernel_size=3,
n_layers=10,
conv_channels=64,
use_bias=True,
dilation_factor=1,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
initializer_seed=42,
apply_sigmoid_at_last=False,
**kwargs,
):
"Init parameters for ParallelWaveGAN Discriminator model."
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.conv_channels = conv_channels
self.use_bias = use_bias
self.dilation_factor = dilation_factor
self.nonlinear_activation = nonlinear_activation
self.nonlinear_activation_params = nonlinear_activation_params
self.initializer_seed = initializer_seed
self.apply_sigmoid_at_last = apply_sigmoid_at_last
| 3,037 | 33.134831 | 74 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/configs/base_config.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Config for all config."""
import abc
import yaml
import os
from tensorflow_tts.utils.utils import CONFIG_FILE_NAME
class BaseConfig(abc.ABC):
def set_config_params(self, config_params):
self.config_params = config_params
def save_pretrained(self, saved_path):
"""Save config to file"""
os.makedirs(saved_path, exist_ok=True)
with open(os.path.join(saved_path, CONFIG_FILE_NAME), "w") as file:
yaml.dump(self.config_params, file, Dumper=yaml.Dumper)
| 1,121 | 33 | 75 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/configs/melgan.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MelGAN Config object."""
from tensorflow_tts.configs import BaseConfig
class MelGANGeneratorConfig(BaseConfig):
"""Initialize MelGAN Generator Config."""
def __init__(
self,
out_channels=1,
kernel_size=7,
filters=512,
use_bias=True,
upsample_scales=[8, 8, 2, 2],
stack_kernel_size=3,
stacks=3,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
padding_type="REFLECT",
use_final_nolinear_activation=True,
is_weight_norm=True,
initializer_seed=42,
**kwargs
):
"""Init parameters for MelGAN Generator model."""
self.out_channels = out_channels
self.kernel_size = kernel_size
self.filters = filters
self.use_bias = use_bias
self.upsample_scales = upsample_scales
self.stack_kernel_size = stack_kernel_size
self.stacks = stacks
self.nonlinear_activation = nonlinear_activation
self.nonlinear_activation_params = nonlinear_activation_params
self.padding_type = padding_type
self.use_final_nolinear_activation = use_final_nolinear_activation
self.is_weight_norm = is_weight_norm
self.initializer_seed = initializer_seed
class MelGANDiscriminatorConfig(object):
"""Initialize MelGAN Discriminator Config."""
def __init__(
self,
out_channels=1,
scales=3,
downsample_pooling="AveragePooling1D",
downsample_pooling_params={"pool_size": 4, "strides": 2,},
kernel_sizes=[5, 3],
filters=16,
max_downsample_filters=1024,
use_bias=True,
downsample_scales=[4, 4, 4, 4],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
padding_type="REFLECT",
is_weight_norm=True,
initializer_seed=42,
**kwargs
):
"""Init parameters for MelGAN Discriminator model."""
self.out_channels = out_channels
self.scales = scales
self.downsample_pooling = downsample_pooling
self.downsample_pooling_params = downsample_pooling_params
self.kernel_sizes = kernel_sizes
self.filters = filters
self.max_downsample_filters = max_downsample_filters
self.use_bias = use_bias
self.downsample_scales = downsample_scales
self.nonlinear_activation = nonlinear_activation
self.nonlinear_activation_params = nonlinear_activation_params
self.padding_type = padding_type
self.is_weight_norm = is_weight_norm
self.initializer_seed = initializer_seed
| 3,279 | 34.268817 | 74 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/configs/tacotron2.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tacotron-2 Config object."""
from tensorflow_tts.configs import BaseConfig
from tensorflow_tts.processor.jsut import JSUT_SYMBOLS
from tensorflow_tts.processor.ljspeech import LJSPEECH_SYMBOLS as lj_symbols
from tensorflow_tts.processor.kss import KSS_SYMBOLS as kss_symbols
from tensorflow_tts.processor.baker import BAKER_SYMBOLS as bk_symbols
from tensorflow_tts.processor.libritts import LIBRITTS_SYMBOLS as lbri_symbols
from tensorflow_tts.processor.ljspeechu import LJSPEECH_U_SYMBOLS as lju_symbols
from tensorflow_tts.processor.synpaflex import SYNPAFLEX_SYMBOLS as synpaflex_symbols
from tensorflow_tts.processor.jsut import JSUT_SYMBOLS as jsut_symbols
class Tacotron2Config(BaseConfig):
"""Initialize Tacotron-2 Config."""
def __init__(
self,
dataset="ljspeech",
vocab_size=len(lj_symbols),
embedding_hidden_size=512,
initializer_range=0.02,
layer_norm_eps=1e-6,
embedding_dropout_prob=0.1,
n_speakers=5,
n_conv_encoder=3,
encoder_conv_filters=512,
encoder_conv_kernel_sizes=5,
encoder_conv_activation="mish",
encoder_conv_dropout_rate=0.5,
encoder_lstm_units=256,
reduction_factor=5,
n_prenet_layers=2,
prenet_units=256,
prenet_activation="mish",
prenet_dropout_rate=0.5,
n_lstm_decoder=1,
decoder_lstm_units=1024,
attention_type="lsa",
attention_dim=128,
attention_filters=32,
attention_kernel=31,
n_mels=80,
n_conv_postnet=5,
postnet_conv_filters=512,
postnet_conv_kernel_sizes=5,
postnet_dropout_rate=0.1,
):
"""Init parameters for Tacotron-2 model."""
if dataset == "ljspeech":
self.vocab_size = vocab_size
elif dataset == "kss":
self.vocab_size = len(kss_symbols)
elif dataset == "baker":
self.vocab_size = len(bk_symbols)
elif dataset == "libritts":
self.vocab_size = len(lbri_symbols)
elif dataset == "ljspeechu":
self.vocab_size = len(lju_symbols)
elif dataset == "synpaflex":
self.vocab_size = len(synpaflex_symbols)
elif dataset == "jsut":
self.vocab_size = len(jsut_symbols)
else:
raise ValueError("No such dataset: {}".format(dataset))
self.embedding_hidden_size = embedding_hidden_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.embedding_dropout_prob = embedding_dropout_prob
self.n_speakers = n_speakers
self.n_conv_encoder = n_conv_encoder
self.encoder_conv_filters = encoder_conv_filters
self.encoder_conv_kernel_sizes = encoder_conv_kernel_sizes
self.encoder_conv_activation = encoder_conv_activation
self.encoder_conv_dropout_rate = encoder_conv_dropout_rate
self.encoder_lstm_units = encoder_lstm_units
# decoder param
self.reduction_factor = reduction_factor
self.n_prenet_layers = n_prenet_layers
self.prenet_units = prenet_units
self.prenet_activation = prenet_activation
self.prenet_dropout_rate = prenet_dropout_rate
self.n_lstm_decoder = n_lstm_decoder
self.decoder_lstm_units = decoder_lstm_units
self.attention_type = attention_type
self.attention_dim = attention_dim
self.attention_filters = attention_filters
self.attention_kernel = attention_kernel
self.n_mels = n_mels
# postnet
self.n_conv_postnet = n_conv_postnet
self.postnet_conv_filters = postnet_conv_filters
self.postnet_conv_kernel_sizes = postnet_conv_kernel_sizes
self.postnet_dropout_rate = postnet_dropout_rate
| 4,436 | 38.616071 | 85 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/configs/mb_melgan.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-band MelGAN Config object."""
from tensorflow_tts.configs import MelGANDiscriminatorConfig, MelGANGeneratorConfig
class MultiBandMelGANGeneratorConfig(MelGANGeneratorConfig):
"""Initialize Multi-band MelGAN Generator Config."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.subbands = kwargs.pop("subbands", 4)
self.taps = kwargs.pop("taps", 62)
self.cutoff_ratio = kwargs.pop("cutoff_ratio", 0.142)
self.beta = kwargs.pop("beta", 9.0)
class MultiBandMelGANDiscriminatorConfig(MelGANDiscriminatorConfig):
"""Initialize Multi-band MelGAN Discriminator Config."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
| 1,330 | 35.972222 | 83 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/configs/hifigan.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorflowTTS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HifiGAN Config object."""
from tensorflow_tts.configs import BaseConfig
class HifiGANGeneratorConfig(BaseConfig):
"""Initialize HifiGAN Generator Config."""
def __init__(
self,
out_channels=1,
kernel_size=7,
filters=128,
use_bias=True,
upsample_scales=[8, 8, 2, 2],
stacks=3,
stack_kernel_size=[3, 7, 11],
stack_dilation_rate=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
padding_type="REFLECT",
use_final_nolinear_activation=True,
is_weight_norm=True,
initializer_seed=42,
**kwargs
):
"""Init parameters for HifiGAN Generator model."""
self.out_channels = out_channels
self.kernel_size = kernel_size
self.filters = filters
self.use_bias = use_bias
self.upsample_scales = upsample_scales
self.stacks = stacks
self.stack_kernel_size = stack_kernel_size
self.stack_dilation_rate = stack_dilation_rate
self.nonlinear_activation = nonlinear_activation
self.nonlinear_activation_params = nonlinear_activation_params
self.padding_type = padding_type
self.use_final_nolinear_activation = use_final_nolinear_activation
self.is_weight_norm = is_weight_norm
self.initializer_seed = initializer_seed
class HifiGANDiscriminatorConfig(object):
"""Initialize HifiGAN Discriminator Config."""
def __init__(
self,
out_channels=1,
period_scales=[2, 3, 5, 7, 11],
n_layers=5,
kernel_size=5,
strides=3,
filters=8,
filter_scales=4,
max_filters=1024,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"alpha": 0.2},
is_weight_norm=True,
initializer_seed=42,
**kwargs
):
"""Init parameters for MelGAN Discriminator model."""
self.out_channels = out_channels
self.period_scales = period_scales
self.n_layers = n_layers
self.kernel_size = kernel_size
self.strides = strides
self.filters = filters
self.filter_scales = filter_scales
self.max_filters = max_filters
self.nonlinear_activation = nonlinear_activation
self.nonlinear_activation_params = nonlinear_activation_params
self.is_weight_norm = is_weight_norm
self.initializer_seed = initializer_seed
| 3,114 | 33.230769 | 74 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/configs/__init__.py | from tensorflow_tts.configs.base_config import BaseConfig
from tensorflow_tts.configs.fastspeech import FastSpeechConfig
from tensorflow_tts.configs.fastspeech2 import FastSpeech2Config
from tensorflow_tts.configs.melgan import (
MelGANDiscriminatorConfig,
MelGANGeneratorConfig,
)
from tensorflow_tts.configs.mb_melgan import (
MultiBandMelGANDiscriminatorConfig,
MultiBandMelGANGeneratorConfig,
)
from tensorflow_tts.configs.hifigan import (
HifiGANGeneratorConfig,
HifiGANDiscriminatorConfig,
)
from tensorflow_tts.configs.tacotron2 import Tacotron2Config
from tensorflow_tts.configs.parallel_wavegan import ParallelWaveGANGeneratorConfig
from tensorflow_tts.configs.parallel_wavegan import ParallelWaveGANDiscriminatorConfig
| 753 | 38.684211 | 86 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/configs/fastspeech2.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FastSpeech2 Config object."""
from tensorflow_tts.configs import FastSpeechConfig
class FastSpeech2Config(FastSpeechConfig):
"""Initialize FastSpeech2 Config."""
def __init__(
self,
variant_prediction_num_conv_layers=2,
variant_kernel_size=9,
variant_dropout_rate=0.5,
variant_predictor_filter=256,
variant_predictor_kernel_size=3,
variant_predictor_dropout_rate=0.5,
**kwargs
):
super().__init__(**kwargs)
self.variant_prediction_num_conv_layers = variant_prediction_num_conv_layers
self.variant_predictor_kernel_size = variant_predictor_kernel_size
self.variant_predictor_dropout_rate = variant_predictor_dropout_rate
self.variant_predictor_filter = variant_predictor_filter
| 1,416 | 35.333333 | 84 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/configs/fastspeech.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FastSpeech Config object."""
import collections
from tensorflow_tts.configs import BaseConfig
from tensorflow_tts.processor.ljspeech import LJSPEECH_SYMBOLS as lj_symbols
from tensorflow_tts.processor.kss import KSS_SYMBOLS as kss_symbols
from tensorflow_tts.processor.baker import BAKER_SYMBOLS as bk_symbols
from tensorflow_tts.processor.libritts import LIBRITTS_SYMBOLS as lbri_symbols
from tensorflow_tts.processor.jsut import JSUT_SYMBOLS as jsut_symbols
SelfAttentionParams = collections.namedtuple(
"SelfAttentionParams",
[
"n_speakers",
"hidden_size",
"num_hidden_layers",
"num_attention_heads",
"attention_head_size",
"intermediate_size",
"intermediate_kernel_size",
"hidden_act",
"output_attentions",
"output_hidden_states",
"initializer_range",
"hidden_dropout_prob",
"attention_probs_dropout_prob",
"layer_norm_eps",
"max_position_embeddings",
],
)
class FastSpeechConfig(BaseConfig):
"""Initialize FastSpeech Config."""
def __init__(
self,
dataset="ljspeech",
vocab_size=len(lj_symbols),
n_speakers=1,
encoder_hidden_size=384,
encoder_num_hidden_layers=4,
encoder_num_attention_heads=2,
encoder_attention_head_size=192,
encoder_intermediate_size=1024,
encoder_intermediate_kernel_size=3,
encoder_hidden_act="mish",
decoder_hidden_size=384,
decoder_num_hidden_layers=4,
decoder_num_attention_heads=2,
decoder_attention_head_size=192,
decoder_intermediate_size=1024,
decoder_intermediate_kernel_size=3,
decoder_hidden_act="mish",
output_attentions=True,
output_hidden_states=True,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
layer_norm_eps=1e-5,
max_position_embeddings=2048,
num_duration_conv_layers=2,
duration_predictor_filters=256,
duration_predictor_kernel_sizes=3,
num_mels=80,
duration_predictor_dropout_probs=0.1,
n_conv_postnet=5,
postnet_conv_filters=512,
postnet_conv_kernel_sizes=5,
postnet_dropout_rate=0.1,
**kwargs
):
"""Init parameters for Fastspeech model."""
# encoder params
if dataset == "ljspeech":
self.vocab_size = vocab_size
elif dataset == "kss":
self.vocab_size = len(kss_symbols)
elif dataset == "baker":
self.vocab_size = len(bk_symbols)
elif dataset == "libritts":
self.vocab_size = len(lbri_symbols)
elif dataset == "jsut":
self.vocab_size = len(jsut_symbols)
else:
raise ValueError("No such dataset: {}".format(dataset))
self.initializer_range = initializer_range
self.max_position_embeddings = max_position_embeddings
self.n_speakers = n_speakers
self.layer_norm_eps = layer_norm_eps
# encoder params
self.encoder_self_attention_params = SelfAttentionParams(
n_speakers=n_speakers,
hidden_size=encoder_hidden_size,
num_hidden_layers=encoder_num_hidden_layers,
num_attention_heads=encoder_num_attention_heads,
attention_head_size=encoder_attention_head_size,
hidden_act=encoder_hidden_act,
intermediate_size=encoder_intermediate_size,
intermediate_kernel_size=encoder_intermediate_kernel_size,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
initializer_range=initializer_range,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
layer_norm_eps=layer_norm_eps,
max_position_embeddings=max_position_embeddings,
)
# decoder params
self.decoder_self_attention_params = SelfAttentionParams(
n_speakers=n_speakers,
hidden_size=decoder_hidden_size,
num_hidden_layers=decoder_num_hidden_layers,
num_attention_heads=decoder_num_attention_heads,
attention_head_size=decoder_attention_head_size,
hidden_act=decoder_hidden_act,
intermediate_size=decoder_intermediate_size,
intermediate_kernel_size=decoder_intermediate_kernel_size,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
initializer_range=initializer_range,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
layer_norm_eps=layer_norm_eps,
max_position_embeddings=max_position_embeddings,
)
self.duration_predictor_dropout_probs = duration_predictor_dropout_probs
self.num_duration_conv_layers = num_duration_conv_layers
self.duration_predictor_filters = duration_predictor_filters
self.duration_predictor_kernel_sizes = duration_predictor_kernel_sizes
self.num_mels = num_mels
# postnet
self.n_conv_postnet = n_conv_postnet
self.postnet_conv_filters = postnet_conv_filters
self.postnet_conv_kernel_sizes = postnet_conv_kernel_sizes
self.postnet_dropout_rate = postnet_dropout_rate
| 6,082 | 37.745223 | 80 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/optimizers/gradient_accumulate.py | """Gradient Accummlate for training TF2 custom training loop.
Copy from https://github.com/OpenNMT/OpenNMT-tf/blob/master/opennmt/optimizers/utils.py.
"""
import re
import tensorflow as tf
class GradientAccumulator(object):
"""Gradient accumulation utility.
When used with a distribution strategy, the accumulator should be called in a
replica context. Gradients will be accumulated locally on each replica and
without synchronization. Users should then call ``.gradients``, scale the
gradients if required, and pass the result to ``apply_gradients``.
"""
# We use the ON_READ synchronization policy so that no synchronization is
# performed on assignment. To get the value, we call .value() which returns the
# value on the current replica without synchronization.
def __init__(self):
"""Initializes the accumulator."""
self._gradients = []
self._accum_steps = None
@property
def step(self):
"""Number of accumulated steps."""
if self._accum_steps is None:
self._accum_steps = tf.Variable(
tf.constant(0, dtype=tf.int64),
trainable=False,
synchronization=tf.VariableSynchronization.ON_READ,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA,
)
return self._accum_steps.value()
@property
def gradients(self):
"""The accumulated gradients on the current replica."""
if not self._gradients:
raise ValueError(
"The accumulator should be called first to initialize the gradients"
)
return list(
gradient.value() if gradient is not None else gradient
for gradient in self._gradients
)
def __call__(self, gradients):
"""Accumulates :obj:`gradients` on the current replica."""
if not self._gradients:
_ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(gradient),
trainable=False,
synchronization=tf.VariableSynchronization.ON_READ,
)
if gradient is not None
else gradient
for gradient in gradients
]
)
if len(gradients) != len(self._gradients):
raise ValueError(
"Expected %s gradients, but got %d"
% (len(self._gradients), len(gradients))
)
for accum_gradient, gradient in zip(self._gradients, gradients):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(gradient, read_value=False)
self._accum_steps.assign_add(1)
def reset(self):
"""Resets the accumulated gradients on the current replica."""
if not self._gradients:
return
self._accum_steps.assign(0)
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(gradient), read_value=False)
| 3,193 | 34.88764 | 88 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/optimizers/adamweightdecay.py | # -*- coding: utf-8 -*-
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AdamW for training self-attention."""
import re
import tensorflow as tf
class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Applys a warmup schedule on a given learning rate decay schedule."""
def __init__(
self,
initial_learning_rate,
decay_schedule_fn,
warmup_steps,
power=1.0,
name=None,
):
super(WarmUp, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.warmup_steps = warmup_steps
self.power = power
self.decay_schedule_fn = decay_schedule_fn
self.name = name
def __call__(self, step):
with tf.name_scope(self.name or "WarmUp") as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
global_step_float = tf.cast(step, tf.float32)
warmup_steps_float = tf.cast(self.warmup_steps, tf.float32)
warmup_percent_done = global_step_float / warmup_steps_float
warmup_learning_rate = self.initial_learning_rate * tf.math.pow(
warmup_percent_done, self.power
)
return tf.cond(
global_step_float < warmup_steps_float,
lambda: warmup_learning_rate,
lambda: self.decay_schedule_fn(step),
name=name,
)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
class AdamWeightDecay(tf.keras.optimizers.Adam):
"""Adam enables L2 weight decay and clip_by_global_norm on gradients.
Just adding the square of the weights to the loss function is *not* the
correct way of using L2 regularization/weight decay with Adam, since that will
interact with the m and v parameters in strange ways.
Instead we want ot decay the weights in a manner that doesn't interact with
the m/v parameters. This is equivalent to adding the square of the weights to
the loss with plain (non-momentum) SGD.
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
weight_decay_rate=0.0,
include_in_weight_decay=None,
exclude_from_weight_decay=None,
name="AdamWeightDecay",
**kwargs
):
super(AdamWeightDecay, self).__init__(
learning_rate, beta_1, beta_2, epsilon, amsgrad, name, **kwargs
)
self.weight_decay_rate = weight_decay_rate
self._include_in_weight_decay = include_in_weight_decay
self._exclude_from_weight_decay = exclude_from_weight_decay
@classmethod
def from_config(cls, config):
"""Creates an optimizer from its config with WarmUp custom object."""
custom_objects = {"WarmUp": WarmUp}
return super(AdamWeightDecay, cls).from_config(
config, custom_objects=custom_objects
)
def _prepare_local(self, var_device, var_dtype, apply_state):
super(AdamWeightDecay, self)._prepare_local(var_device, var_dtype, apply_state)
apply_state["weight_decay_rate"] = tf.constant(
self.weight_decay_rate, name="adam_weight_decay_rate"
)
def _decay_weights_op(self, var, learning_rate, apply_state):
do_decay = self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state["weight_decay_rate"],
use_locking=self._use_locking,
)
return tf.no_op()
def apply_gradients(self, grads_and_vars, clip_norm=0.5, **kwargs):
grads, tvars = list(zip(*grads_and_vars))
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=clip_norm)
return super(AdamWeightDecay, self).apply_gradients(zip(grads, tvars), **kwargs)
def _get_lr(self, var_device, var_dtype, apply_state):
"""Retrieves the learning rate with the given state."""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
apply_state = apply_state or {}
coefficients = apply_state.get((var_device, var_dtype))
if coefficients is None:
coefficients = self._fallback_apply_state(var_device, var_dtype)
apply_state[(var_device, var_dtype)] = coefficients
return coefficients["lr_t"], dict(apply_state=apply_state)
def _resource_apply_dense(self, grad, var, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super(AdamWeightDecay, self)._resource_apply_dense(
grad, var, **kwargs
)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super(AdamWeightDecay, self)._resource_apply_sparse(
grad, var, indices, **kwargs
)
def get_config(self):
config = super(AdamWeightDecay, self).get_config()
config.update(
{"weight_decay_rate": self.weight_decay_rate,}
)
return config
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(r, param_name) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
| 6,854 | 37.511236 | 88 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/optimizers/__init__.py | from tensorflow_tts.optimizers.adamweightdecay import AdamWeightDecay, WarmUp
from tensorflow_tts.optimizers.gradient_accumulate import GradientAccumulator
| 156 | 51.333333 | 77 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/utils/number_norm.py | # -*- coding: utf-8 -*-
# Copyright (c) 2017 Keith Ito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Number norm module."""
import re
import inflect
_inflect = inflect.engine()
_comma_number_re = re.compile(r"([0-9][0-9\,]+[0-9])")
_decimal_number_re = re.compile(r"([0-9]+\.[0-9]+)")
_pounds_re = re.compile(r"£([0-9\,]*[0-9]+)")
_dollars_re = re.compile(r"\$([0-9\.\,]*[0-9]+)")
_ordinal_re = re.compile(r"[0-9]+(st|nd|rd|th)")
_number_re = re.compile(r"[0-9]+")
def _remove_commas(m):
return m.group(1).replace(",", "")
def _expand_decimal_point(m):
return m.group(1).replace(".", " point ")
def _expand_dollars(m):
match = m.group(1)
parts = match.split(".")
if len(parts) > 2:
return match + " dollars" # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = "dollar" if dollars == 1 else "dollars"
cent_unit = "cent" if cents == 1 else "cents"
return "%s %s, %s %s" % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = "dollar" if dollars == 1 else "dollars"
return "%s %s" % (dollars, dollar_unit)
elif cents:
cent_unit = "cent" if cents == 1 else "cents"
return "%s %s" % (cents, cent_unit)
else:
return "zero dollars"
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return "two thousand"
elif num > 2000 and num < 2010:
return "two thousand " + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + " hundred"
else:
return _inflect.number_to_words(
num, andword="", zero="oh", group=2
).replace(", ", " ")
else:
return _inflect.number_to_words(num, andword="")
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r"\1 pounds", text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
return text
| 3,408 | 34.884211 | 79 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/utils/korean.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlowTTS Team, Jaehyoung Kim(@crux153) and Taehoon Kim(@carpedm20)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Code based on https://github.com/carpedm20/multi-speaker-tacotron-tensorflow
"""Korean related helpers."""
import ast
import json
import os
import re
from jamo import h2j, hangul_to_jamo, j2h, jamo_to_hcj
etc_dictionary = {
"2 30대": "이삼십대",
"20~30대": "이삼십대",
"20, 30대": "이십대 삼십대",
"1+1": "원플러스원",
"3에서 6개월인": "3개월에서 육개월인",
}
english_dictionary = {
"Devsisters": "데브시스터즈",
"track": "트랙",
# krbook
"LA": "엘에이",
"LG": "엘지",
"KOREA": "코리아",
"JSA": "제이에스에이",
"PGA": "피지에이",
"GA": "지에이",
"idol": "아이돌",
"KTX": "케이티엑스",
"AC": "에이씨",
"DVD": "디비디",
"US": "유에스",
"CNN": "씨엔엔",
"LPGA": "엘피지에이",
"P": "피",
"L": "엘",
"T": "티",
"B": "비",
"C": "씨",
"BIFF": "비아이에프에프",
"GV": "지비",
# JTBC
"IT": "아이티",
"IQ": "아이큐",
"JTBC": "제이티비씨",
"trickle down effect": "트리클 다운 이펙트",
"trickle up effect": "트리클 업 이펙트",
"down": "다운",
"up": "업",
"FCK": "에프씨케이",
"AP": "에이피",
"WHERETHEWILDTHINGSARE": "",
"Rashomon Effect": "",
"O": "오",
"OO": "오오",
"B": "비",
"GDP": "지디피",
"CIPA": "씨아이피에이",
"YS": "와이에스",
"Y": "와이",
"S": "에스",
"JTBC": "제이티비씨",
"PC": "피씨",
"bill": "빌",
"Halmuny": "하모니", #####
"X": "엑스",
"SNS": "에스엔에스",
"ability": "어빌리티",
"shy": "",
"CCTV": "씨씨티비",
"IT": "아이티",
"the tenth man": "더 텐쓰 맨", ####
"L": "엘",
"PC": "피씨",
"YSDJJPMB": "", ########
"Content Attitude Timing": "컨텐트 애티튜드 타이밍",
"CAT": "캣",
"IS": "아이에스",
"K": "케이",
"Y": "와이",
"KDI": "케이디아이",
"DOC": "디오씨",
"CIA": "씨아이에이",
"PBS": "피비에스",
"D": "디",
"PPropertyPositionPowerPrisonP" "S": "에스",
"francisco": "프란시스코",
"I": "아이",
"III": "아이아이", ######
"No joke": "노 조크",
"BBK": "비비케이",
"LA": "엘에이",
"Don": "",
"t worry be happy": " 워리 비 해피",
"NO": "엔오", #####
"it was our sky": "잇 워즈 아워 스카이",
"it is our sky": "잇 이즈 아워 스카이", ####
"NEIS": "엔이아이에스", #####
"IMF": "아이엠에프",
"apology": "어폴로지",
"humble": "험블",
"M": "엠",
"Nowhere Man": "노웨어 맨",
"The Tenth Man": "더 텐쓰 맨",
"PBS": "피비에스",
"BBC": "비비씨",
"MRJ": "엠알제이",
"CCTV": "씨씨티비",
"Pick me up": "픽 미 업",
"DNA": "디엔에이",
"UN": "유엔",
"STOP": "스탑", #####
"PRESS": "프레스", #####
"not to be": "낫 투비",
"Denial": "디나이얼",
"G": "지",
"IMF": "아이엠에프",
"GDP": "지디피",
"JTBC": "제이티비씨",
"Time flies like an arrow": "타임 플라이즈 라이크 언 애로우",
"DDT": "디디티",
"AI": "에이아이",
"Z": "제트",
"OECD": "오이씨디",
"N": "앤",
"A": "에이",
"MB": "엠비",
"EH": "이에이치",
"IS": "아이에스",
"TV": "티비",
"MIT": "엠아이티",
"KBO": "케이비오",
"I love America": "아이 러브 아메리카",
"SF": "에스에프",
"Q": "큐",
"KFX": "케이에프엑스",
"PM": "피엠",
"Prime Minister": "프라임 미니스터",
"Swordline": "스워드라인",
"TBS": "티비에스",
"DDT": "디디티",
"CS": "씨에스",
"Reflecting Absence": "리플렉팅 앱센스",
"PBS": "피비에스",
"Drum being beaten by everyone": "드럼 빙 비튼 바이 에브리원",
"negative pressure": "네거티브 프레셔",
"F": "에프",
"KIA": "기아",
"FTA": "에프티에이",
"Que sais-je": "",
"UFC": "유에프씨",
"P": "피",
"DJ": "디제이",
"Chaebol": "채벌",
"BBC": "비비씨",
"OECD": "오이씨디",
"BC": "삐씨",
"C": "씨",
"B": "씨",
"KY": "케이와이",
"K": "케이",
"CEO": "씨이오",
"YH": "와이에치",
"IS": "아이에스",
"who are you": "후 얼 유",
"Y": "와이",
"The Devils Advocate": "더 데빌즈 어드보카트",
"YS": "와이에스",
"so sorry": "쏘 쏘리",
"Santa": "산타",
"Big Endian": "빅 엔디안",
"Small Endian": "스몰 엔디안",
"Oh Captain My Captain": "오 캡틴 마이 캡틴",
"AIB": "에이아이비",
"K": "케이",
"PBS": "피비에스",
# IU
"ASMR": "에이에스엠알",
"V": "브이",
"PD": "피디",
"CD": "씨디",
"ANR": "에이엔알",
"Twenty Three": "투엔티 쓰리",
"Through The Night": "쓰루 더 나잇",
"MD": "엠디",
}
num_to_kor = {
"0": "영",
"1": "일",
"2": "이",
"3": "삼",
"4": "사",
"5": "오",
"6": "육",
"7": "칠",
"8": "팔",
"9": "구",
}
unit_to_kor1 = {"%": "퍼센트", "cm": "센치미터", "mm": "밀리미터", "km": "킬로미터", "kg": "킬로그람"}
unit_to_kor2 = {"m": "미터"}
upper_to_kor = {
"A": "에이",
"B": "비",
"C": "씨",
"D": "디",
"E": "이",
"F": "에프",
"G": "지",
"H": "에이치",
"I": "아이",
"J": "제이",
"K": "케이",
"L": "엘",
"M": "엠",
"N": "엔",
"O": "오",
"P": "피",
"Q": "큐",
"R": "알",
"S": "에스",
"T": "티",
"U": "유",
"V": "브이",
"W": "더블유",
"X": "엑스",
"Y": "와이",
"Z": "지",
}
"""
초성과 종성은 같아보이지만, 다른 character이다.
'_-!'(),-.:;? ᄀᄁᄂᄃᄄᄅᄆᄇᄈᄉᄊᄋᄌᄍᄎᄏᄐᄑ하ᅢᅣᅤᅥᅦᅧᅨᅩᅪᅫᅬᅭᅮᅯᅰᅱᅲᅳᅴᅵᆨᆩᆪᆫᆬᆭᆮᆯᆰᆱᆲᆳᆴᆵᆶᆷᆸᆹᆺᆻᆼᆽᆾᆿᇀᇁᇂ~'
'_': 0, '-': 7, '!': 2, "'": 3, '(': 4, ')': 5, ',': 6, '.': 8, ':': 9, ';': 10,
'?': 11, ' ': 12, 'ᄀ': 13, 'ᄁ': 14, 'ᄂ': 15, 'ᄃ': 16, 'ᄄ': 17, 'ᄅ': 18, 'ᄆ': 19, 'ᄇ': 20,
'ᄈ': 21, 'ᄉ': 22, 'ᄊ': 23, 'ᄋ': 24, 'ᄌ': 25, 'ᄍ': 26, 'ᄎ': 27, 'ᄏ': 28, 'ᄐ': 29, 'ᄑ': 30,
'ᄒ': 31, 'ᅡ': 32, 'ᅢ': 33, 'ᅣ': 34, 'ᅤ': 35, 'ᅥ': 36, 'ᅦ': 37, 'ᅧ': 38, 'ᅨ': 39, 'ᅩ': 40,
'ᅪ': 41, 'ᅫ': 42, 'ᅬ': 43, 'ᅭ': 44, 'ᅮ': 45, 'ᅯ': 46, 'ᅰ': 47, 'ᅱ': 48, 'ᅲ': 49, 'ᅳ': 50,
'ᅴ': 51, 'ᅵ': 52, 'ᆨ': 53, 'ᆩ': 54, 'ᆪ': 55, 'ᆫ': 56, 'ᆬ': 57, 'ᆭ': 58, 'ᆮ': 59, 'ᆯ': 60,
'ᆰ': 61, 'ᆱ': 62, 'ᆲ': 63, 'ᆳ': 64, 'ᆴ': 65, 'ᆵ': 66, 'ᆶ': 67, 'ᆷ': 68, 'ᆸ': 69, 'ᆹ': 70,
'ᆺ': 71, 'ᆻ': 72, 'ᆼ': 73, 'ᆽ': 74, 'ᆾ': 75, 'ᆿ': 76, 'ᇀ': 77, 'ᇁ': 78, 'ᇂ': 79, '~': 80
"""
_pad = "pad"
_eos = "eos"
_punctuation = "!'(),-.:;? "
_special = "-"
_jamo_leads = [chr(_) for _ in range(0x1100, 0x1113)]
_jamo_vowels = [chr(_) for _ in range(0x1161, 0x1176)]
_jamo_tails = [chr(_) for _ in range(0x11A8, 0x11C3)]
_letters = _jamo_leads + _jamo_vowels + _jamo_tails
symbols = [_pad] + list(_special) + list(_punctuation) + _letters + [_eos]
_symbol_to_id = {c: i for i, c in enumerate(symbols)}
_id_to_symbol = {i: c for i, c in enumerate(symbols)}
quote_checker = """([`"'"“‘])(.+?)([`"'"”’])"""
def is_lead(char):
return char in _jamo_leads
def is_vowel(char):
return char in _jamo_vowels
def is_tail(char):
return char in _jamo_tails
def get_mode(char):
if is_lead(char):
return 0
elif is_vowel(char):
return 1
elif is_tail(char):
return 2
else:
return -1
def _get_text_from_candidates(candidates):
if len(candidates) == 0:
return ""
elif len(candidates) == 1:
return jamo_to_hcj(candidates[0])
else:
return j2h(**dict(zip(["lead", "vowel", "tail"], candidates)))
def jamo_to_korean(text):
text = h2j(text)
idx = 0
new_text = ""
candidates = []
while True:
if idx >= len(text):
new_text += _get_text_from_candidates(candidates)
break
char = text[idx]
mode = get_mode(char)
if mode == 0:
new_text += _get_text_from_candidates(candidates)
candidates = [char]
elif mode == -1:
new_text += _get_text_from_candidates(candidates)
new_text += char
candidates = []
else:
candidates.append(char)
idx += 1
return new_text
def compare_sentence_with_jamo(text1, text2):
return h2j(text1) != h2j(text2)
def tokenize(text, as_id=False):
# jamo package에 있는 hangul_to_jamo를 이용하여 한글 string을 초성/중성/종성으로 나눈다.
text = normalize(text)
tokens = list(
hangul_to_jamo(text)
) # '존경하는' --> ['ᄌ', 'ᅩ', 'ᆫ', 'ᄀ', 'ᅧ', 'ᆼ', 'ᄒ', 'ᅡ', 'ᄂ', 'ᅳ', 'ᆫ', '~']
if as_id:
return [_symbol_to_id[token] for token in tokens]
else:
return [token for token in tokens]
def tokenizer_fn(iterator):
return (token for x in iterator for token in tokenize(x, as_id=False))
def normalize(text):
text = text.strip()
text = re.sub("\(\d+일\)", "", text)
text = re.sub("\([⺀-⺙⺛-⻳⼀-⿕々〇〡-〩〸-〺〻㐀-䶵一-鿃豈-鶴侮-頻並-龎]+\)", "", text)
text = normalize_with_dictionary(text, etc_dictionary)
text = normalize_english(text)
text = re.sub("[a-zA-Z]+", normalize_upper, text)
text = normalize_quote(text)
text = normalize_number(text)
return text
def normalize_with_dictionary(text, dic):
if any(key in text for key in dic.keys()):
pattern = re.compile("|".join(re.escape(key) for key in dic.keys()))
return pattern.sub(lambda x: dic[x.group()], text)
else:
return text
def normalize_english(text):
def fn(m):
word = m.group()
if word in english_dictionary:
return english_dictionary.get(word)
else:
return word
text = re.sub("([A-Za-z]+)", fn, text)
return text
def normalize_upper(text):
text = text.group(0)
if all([char.isupper() for char in text]):
return "".join(upper_to_kor[char] for char in text)
else:
return text
def normalize_quote(text):
def fn(found_text):
from nltk import sent_tokenize # NLTK doesn't along with multiprocessing
found_text = found_text.group()
unquoted_text = found_text[1:-1]
sentences = sent_tokenize(unquoted_text)
return " ".join(["'{}'".format(sent) for sent in sentences])
return re.sub(quote_checker, fn, text)
number_checker = "([+-]?\d[\d,]*)[\.]?\d*"
count_checker = "(시|명|가지|살|마리|포기|송이|수|톨|통|점|개|벌|척|채|다발|그루|자루|줄|켤레|그릇|잔|마디|상자|사람|곡|병|판)"
def normalize_number(text):
text = normalize_with_dictionary(text, unit_to_kor1)
text = normalize_with_dictionary(text, unit_to_kor2)
text = re.sub(
number_checker + count_checker, lambda x: number_to_korean(x, True), text
)
text = re.sub(number_checker, lambda x: number_to_korean(x, False), text)
return text
num_to_kor1 = [""] + list("일이삼사오육칠팔구")
num_to_kor2 = [""] + list("만억조경해")
num_to_kor3 = [""] + list("십백천")
# count_to_kor1 = [""] + ["하나","둘","셋","넷","다섯","여섯","일곱","여덟","아홉"]
count_to_kor1 = [""] + ["한", "두", "세", "네", "다섯", "여섯", "일곱", "여덟", "아홉"]
count_tenth_dict = {
"십": "열",
"두십": "스물",
"세십": "서른",
"네십": "마흔",
"다섯십": "쉰",
"여섯십": "예순",
"일곱십": "일흔",
"여덟십": "여든",
"아홉십": "아흔",
}
def number_to_korean(num_str, is_count=False):
if is_count:
num_str, unit_str = num_str.group(1), num_str.group(2)
else:
num_str, unit_str = num_str.group(), ""
num_str = num_str.replace(",", "")
num = ast.literal_eval(num_str)
if num == 0:
return "영"
check_float = num_str.split(".")
if len(check_float) == 2:
digit_str, float_str = check_float
elif len(check_float) >= 3:
raise Exception(" [!] Wrong number format")
else:
digit_str, float_str = check_float[0], None
if is_count and float_str is not None:
raise Exception(" [!] `is_count` and float number does not fit each other")
digit = int(digit_str)
if digit_str.startswith("-"):
digit, digit_str = abs(digit), str(abs(digit))
kor = ""
size = len(str(digit))
tmp = []
for i, v in enumerate(digit_str, start=1):
v = int(v)
if v != 0:
if is_count:
tmp += count_to_kor1[v]
else:
tmp += num_to_kor1[v]
tmp += num_to_kor3[(size - i) % 4]
if (size - i) % 4 == 0 and len(tmp) != 0:
kor += "".join(tmp)
tmp = []
kor += num_to_kor2[int((size - i) / 4)]
if is_count:
if kor.startswith("한") and len(kor) > 1:
kor = kor[1:]
if any(word in kor for word in count_tenth_dict):
kor = re.sub(
"|".join(count_tenth_dict.keys()),
lambda x: count_tenth_dict[x.group()],
kor,
)
if not is_count and kor.startswith("일") and len(kor) > 1:
kor = kor[1:]
if float_str is not None:
kor += "쩜 "
kor += re.sub("\d", lambda x: num_to_kor[x.group()], float_str)
if num_str.startswith("+"):
kor = "플러스 " + kor
elif num_str.startswith("-"):
kor = "마이너스 " + kor
return kor + unit_str
| 12,706 | 22.975472 | 89 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/utils/cleaners.py | # -*- coding: utf-8 -*-
# Copyright (c) 2017 Keith Ito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
from tensorflow_tts.utils.korean import tokenize as ko_tokenize
from tensorflow_tts.utils.number_norm import normalize_numbers
from unidecode import unidecode
try:
from german_transliterate.core import GermanTransliterate
except:
pass
# Regular expression matching whitespace:
_whitespace_re = re.compile(r"\s+")
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [
(re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1])
for x in [
("mrs", "misess"),
("mr", "mister"),
("dr", "doctor"),
("st", "saint"),
("co", "company"),
("jr", "junior"),
("maj", "major"),
("gen", "general"),
("drs", "doctors"),
("rev", "reverend"),
("lt", "lieutenant"),
("hon", "honorable"),
("sgt", "sergeant"),
("capt", "captain"),
("esq", "esquire"),
("ltd", "limited"),
("col", "colonel"),
("ft", "fort"),
]
]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, " ", text)
def convert_to_ascii(text):
return unidecode(text)
def basic_cleaners(text):
"""Basic pipeline that lowercases and collapses whitespace without transliteration."""
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
"""Pipeline for non-English text that transliterates to ASCII."""
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
"""Pipeline for English text, including number and abbreviation expansion."""
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
def korean_cleaners(text):
"""Pipeline for Korean text, including number and abbreviation expansion."""
text = ko_tokenize(
text
) # '존경하는' --> ['ᄌ', 'ᅩ', 'ᆫ', 'ᄀ', 'ᅧ', 'ᆼ', 'ᄒ', 'ᅡ', 'ᄂ', 'ᅳ', 'ᆫ']
return text
def german_cleaners(text):
"""Pipeline for German text, including number and abbreviation expansion."""
try:
text = GermanTransliterate(replace={';': ',', ':': ' '}, sep_abbreviation=' -- ').transliterate(text)
except NameError:
raise ModuleNotFoundError("Install german_transliterate package to use german_cleaners")
return text | 3,815 | 30.278689 | 109 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/utils/utils.py | # -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Utility functions."""
import fnmatch
import os
import re
import tempfile
from pathlib import Path
import tensorflow as tf
MODEL_FILE_NAME = "model.h5"
CONFIG_FILE_NAME = "config.yml"
PROCESSOR_FILE_NAME = "processor.json"
LIBRARY_NAME = "tensorflow_tts"
CACHE_DIRECTORY = os.path.join(Path.home(), ".cache", LIBRARY_NAME)
def find_files(root_dir, query="*.wav", include_root_dir=True):
"""Find files recursively.
Args:
root_dir (str): Root root_dir to find.
query (str): Query to find.
include_root_dir (bool): If False, root_dir name is not included.
Returns:
list: List of found filenames.
"""
files = []
for root, _, filenames in os.walk(root_dir, followlinks=True):
for filename in fnmatch.filter(filenames, query):
files.append(os.path.join(root, filename))
if not include_root_dir:
files = [file_.replace(root_dir + "/", "") for file_ in files]
return files
def _path_requires_gfile(filepath):
"""Checks if the given path requires use of GFile API.
Args:
filepath (str): Path to check.
Returns:
bool: True if the given path needs GFile API to access, such as
"s3://some/path" and "gs://some/path".
"""
# If the filepath contains a protocol (e.g. "gs://"), it should be handled
# using TensorFlow GFile API.
return bool(re.match(r"^[a-z]+://", filepath))
def save_weights(model, filepath):
"""Save model weights.
Same as model.save_weights(filepath), but supports saving to S3 or GCS
buckets using TensorFlow GFile API.
Args:
model (tf.keras.Model): Model to save.
filepath (str): Path to save the model weights to.
"""
if not _path_requires_gfile(filepath):
model.save_weights(filepath)
return
# Save to a local temp file and copy to the desired path using GFile API.
_, ext = os.path.splitext(filepath)
with tempfile.NamedTemporaryFile(suffix=ext) as temp_file:
model.save_weights(temp_file.name)
# To preserve the original semantics, we need to overwrite the target
# file.
tf.io.gfile.copy(temp_file.name, filepath, overwrite=True)
def load_weights(model, filepath):
"""Load model weights.
Same as model.load_weights(filepath), but supports loading from S3 or GCS
buckets using TensorFlow GFile API.
Args:
model (tf.keras.Model): Model to load weights to.
filepath (str): Path to the weights file.
"""
if not _path_requires_gfile(filepath):
model.load_weights(filepath)
return
# Make a local copy and load it.
_, ext = os.path.splitext(filepath)
with tempfile.NamedTemporaryFile(suffix=ext) as temp_file:
# The target temp_file should be created above, so we need to overwrite.
tf.io.gfile.copy(filepath, temp_file.name, overwrite=True)
model.load_weights(temp_file.name)
| 3,053 | 30.163265 | 80 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/utils/strategy.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Strategy util functions"""
import tensorflow as tf
def return_strategy():
physical_devices = tf.config.list_physical_devices("GPU")
if len(physical_devices) == 0:
return tf.distribute.OneDeviceStrategy(device="/cpu:0")
elif len(physical_devices) == 1:
return tf.distribute.OneDeviceStrategy(device="/gpu:0")
else:
return tf.distribute.MirroredStrategy()
def calculate_3d_loss(y_gt, y_pred, loss_fn):
"""Calculate 3d loss, normally it's mel-spectrogram loss."""
y_gt_T = tf.shape(y_gt)[1]
y_pred_T = tf.shape(y_pred)[1]
# there is a mismath length when training multiple GPU.
# we need slice the longer tensor to make sure the loss
# calculated correctly.
if y_gt_T > y_pred_T:
y_gt = tf.slice(y_gt, [0, 0, 0], [-1, y_pred_T, -1])
elif y_pred_T > y_gt_T:
y_pred = tf.slice(y_pred, [0, 0, 0], [-1, y_gt_T, -1])
loss = loss_fn(y_gt, y_pred)
if isinstance(loss, tuple) is False:
loss = tf.reduce_mean(loss, list(range(1, len(loss.shape)))) # shape = [B]
else:
loss = list(loss)
for i in range(len(loss)):
loss[i] = tf.reduce_mean(
loss[i], list(range(1, len(loss[i].shape)))
) # shape = [B]
return loss
def calculate_2d_loss(y_gt, y_pred, loss_fn):
"""Calculate 2d loss, normally it's durrations/f0s/energys loss."""
y_gt_T = tf.shape(y_gt)[1]
y_pred_T = tf.shape(y_pred)[1]
# there is a mismath length when training multiple GPU.
# we need slice the longer tensor to make sure the loss
# calculated correctly.
if y_gt_T > y_pred_T:
y_gt = tf.slice(y_gt, [0, 0], [-1, y_pred_T])
elif y_pred_T > y_gt_T:
y_pred = tf.slice(y_pred, [0, 0], [-1, y_gt_T])
loss = loss_fn(y_gt, y_pred)
if isinstance(loss, tuple) is False:
loss = tf.reduce_mean(loss, list(range(1, len(loss.shape)))) # shape = [B]
else:
loss = list(loss)
for i in range(len(loss)):
loss[i] = tf.reduce_mean(
loss[i], list(range(1, len(loss[i].shape)))
) # shape = [B]
return loss
| 2,766 | 34.474359 | 83 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/utils/decoder.py | # -*- coding: utf-8 -*-
# Copyright 2020 TensorFlow Authors, All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Tuple, Union
import tensorflow as tf
from tensorflow.python.ops import control_flow_util
from tensorflow_addons.seq2seq import Decoder
from tensorflow_addons.seq2seq.decoder import (
BaseDecoder,
_prepend_batch,
_transpose_batch_time,
)
from tensorflow_addons.utils.types import Number, TensorLike
def dynamic_decode(
decoder: Union[Decoder, BaseDecoder],
output_time_major: bool = False,
impute_finished: bool = False,
maximum_iterations: Optional[TensorLike] = None,
parallel_iterations: int = 32,
swap_memory: bool = False,
training: Optional[bool] = None,
scope: Optional[str] = None,
enable_tflite_convertible: bool = False,
**kwargs
) -> Tuple[Any, Any, Any]:
"""Perform dynamic decoding with `decoder`.
Calls initialize() once and step() repeatedly on the Decoder object.
Args:
decoder: A `Decoder` instance.
output_time_major: Python boolean. Default: `False` (batch major). If
`True`, outputs are returned as time major tensors (this mode is
faster). Otherwise, outputs are returned as batch major tensors (this
adds extra time to the computation).
impute_finished: Python boolean. If `True`, then states for batch
entries which are marked as finished get copied through and the
corresponding outputs get zeroed out. This causes some slowdown at
each time step, but ensures that the final state and outputs have
the correct values and that backprop ignores time steps that were
marked as finished.
maximum_iterations: A strictly positive `int32` scalar, the maximum
allowed number of decoding steps. Default is `None` (decode until the
decoder is fully done).
parallel_iterations: Argument passed to `tf.while_loop`.
swap_memory: Argument passed to `tf.while_loop`.
training: Python boolean. Indicates whether the layer should behave
in training mode or in inference mode. Only relevant
when `dropout` or `recurrent_dropout` is used.
scope: Optional name scope to use.
enable_tflite_convertible: Python boolean. If `True`, then the variables
of `TensorArray` become of 1-D static shape. Also zero pads in the
output tensor will be discarded. Default: `False`.
**kwargs: dict, other keyword arguments for dynamic_decode. It might
contain arguments for `BaseDecoder` to initialize, which takes all
tensor inputs during call().
Returns:
`(final_outputs, final_state, final_sequence_lengths)`.
Raises:
ValueError: if `maximum_iterations` is provided but is not a scalar.
"""
with tf.name_scope(scope or "decoder"):
is_xla = not tf.executing_eagerly() and control_flow_util.GraphOrParentsInXlaContext(
tf.compat.v1.get_default_graph()
)
if maximum_iterations is not None:
maximum_iterations = tf.convert_to_tensor(
maximum_iterations, dtype=tf.int32, name="maximum_iterations"
)
if maximum_iterations.shape.ndims != 0:
raise ValueError("maximum_iterations must be a scalar")
tf.debugging.assert_greater(
maximum_iterations,
0,
message="maximum_iterations should be greater than 0",
)
elif is_xla:
raise ValueError("maximum_iterations is required for XLA compilation.")
if isinstance(decoder, Decoder):
initial_finished, initial_inputs, initial_state = decoder.initialize()
else:
# For BaseDecoder that takes tensor inputs during call.
decoder_init_input = kwargs.pop("decoder_init_input", None)
decoder_init_kwargs = kwargs.pop("decoder_init_kwargs", {})
initial_finished, initial_inputs, initial_state = decoder.initialize(
decoder_init_input, **decoder_init_kwargs
)
if enable_tflite_convertible:
# Assume the batch_size = 1 for inference.
# So we can change 2-D TensorArray into 1-D by reshaping it.
zero_outputs = tf.nest.map_structure(
lambda shape, dtype: tf.reshape(
tf.zeros(_prepend_batch(decoder.batch_size, shape), dtype=dtype),
[-1],
),
decoder.output_size,
decoder.output_dtype,
)
else:
zero_outputs = tf.nest.map_structure(
lambda shape, dtype: tf.zeros(
_prepend_batch(decoder.batch_size, shape), dtype=dtype
),
decoder.output_size,
decoder.output_dtype,
)
if maximum_iterations is not None:
initial_finished = tf.logical_or(initial_finished, 0 >= maximum_iterations)
initial_sequence_lengths = tf.zeros_like(initial_finished, dtype=tf.int32)
initial_time = tf.constant(0, dtype=tf.int32)
def _shape(batch_size, from_shape):
if not isinstance(from_shape, tf.TensorShape) or from_shape.ndims == 0:
return None
else:
batch_size = tf.get_static_value(
tf.convert_to_tensor(batch_size, name="batch_size")
)
if enable_tflite_convertible:
# Since we can't use 2-D TensoArray and assume `batch_size` = 1,
# we use `from_shape` dimension only.
return from_shape
return tf.TensorShape([batch_size]).concatenate(from_shape)
dynamic_size = maximum_iterations is None or not is_xla
# The dynamic shape `TensoArray` is not allowed in TFLite yet.
dynamic_size = dynamic_size and (not enable_tflite_convertible)
def _create_ta(s, d):
return tf.TensorArray(
dtype=d,
size=0 if dynamic_size else maximum_iterations,
dynamic_size=dynamic_size,
element_shape=_shape(decoder.batch_size, s),
)
initial_outputs_ta = tf.nest.map_structure(
_create_ta, decoder.output_size, decoder.output_dtype
)
def condition(
unused_time,
unused_outputs_ta,
unused_state,
unused_inputs,
finished,
unused_sequence_lengths,
):
return tf.logical_not(tf.reduce_all(finished))
def body(time, outputs_ta, state, inputs, finished, sequence_lengths):
"""Internal while_loop body.
Args:
time: scalar int32 tensor.
outputs_ta: structure of TensorArray.
state: (structure of) state tensors and TensorArrays.
inputs: (structure of) input tensors.
finished: bool tensor (keeping track of what's finished).
sequence_lengths: int32 tensor (keeping track of time of finish).
Returns:
`(time + 1, outputs_ta, next_state, next_inputs, next_finished,
next_sequence_lengths)`.
```
"""
(next_outputs, decoder_state, next_inputs, decoder_finished) = decoder.step(
time, inputs, state, training
)
decoder_state_sequence_lengths = False
if decoder.tracks_own_finished:
next_finished = decoder_finished
lengths = getattr(decoder_state, "lengths", None)
if lengths is not None:
# sequence lengths are provided by decoder_state.lengths;
# overwrite our sequence lengths.
decoder_state_sequence_lengths = True
sequence_lengths = tf.cast(lengths, tf.int32)
else:
next_finished = tf.logical_or(decoder_finished, finished)
if decoder_state_sequence_lengths:
# Just pass something through the loop; at the next iteration
# we'll pull the sequence lengths from the decoder_state again.
next_sequence_lengths = sequence_lengths
else:
next_sequence_lengths = tf.where(
tf.logical_not(finished),
tf.fill(tf.shape(sequence_lengths), time + 1),
sequence_lengths,
)
tf.nest.assert_same_structure(state, decoder_state)
tf.nest.assert_same_structure(outputs_ta, next_outputs)
tf.nest.assert_same_structure(inputs, next_inputs)
# Zero out output values past finish
if impute_finished:
def zero_out_finished(out, zero):
if finished.shape.rank < zero.shape.rank:
broadcast_finished = tf.broadcast_to(
tf.expand_dims(finished, axis=-1), zero.shape
)
return tf.where(broadcast_finished, zero, out)
else:
return tf.where(finished, zero, out)
emit = tf.nest.map_structure(
zero_out_finished, next_outputs, zero_outputs
)
else:
emit = next_outputs
# Copy through states past finish
def _maybe_copy_state(new, cur):
# TensorArrays and scalar states get passed through.
if isinstance(cur, tf.TensorArray):
pass_through = True
else:
new.set_shape(cur.shape)
pass_through = new.shape.ndims == 0
if not pass_through:
broadcast_finished = tf.broadcast_to(
tf.expand_dims(finished, axis=-1), new.shape
)
return tf.where(broadcast_finished, cur, new)
else:
return new
if impute_finished:
next_state = tf.nest.map_structure(
_maybe_copy_state, decoder_state, state
)
else:
next_state = decoder_state
if enable_tflite_convertible:
# Reshape to 1-D.
emit = tf.nest.map_structure(lambda x: tf.reshape(x, [-1]), emit)
outputs_ta = tf.nest.map_structure(
lambda ta, out: ta.write(time, out), outputs_ta, emit
)
return (
time + 1,
outputs_ta,
next_state,
next_inputs,
next_finished,
next_sequence_lengths,
)
res = tf.while_loop(
condition,
body,
loop_vars=(
initial_time,
initial_outputs_ta,
initial_state,
initial_inputs,
initial_finished,
initial_sequence_lengths,
),
parallel_iterations=parallel_iterations,
maximum_iterations=maximum_iterations,
swap_memory=swap_memory,
)
final_outputs_ta = res[1]
final_state = res[2]
final_sequence_lengths = res[5]
final_outputs = tf.nest.map_structure(lambda ta: ta.stack(), final_outputs_ta)
try:
final_outputs, final_state = decoder.finalize(
final_outputs, final_state, final_sequence_lengths
)
except NotImplementedError:
pass
if not output_time_major:
if enable_tflite_convertible:
# Reshape the output to the original shape.
def _restore_batch(x):
return tf.expand_dims(x, [1])
final_outputs = tf.nest.map_structure(_restore_batch, final_outputs)
final_outputs = tf.nest.map_structure(_transpose_batch_time, final_outputs)
return final_outputs, final_state, final_sequence_lengths
| 12,732 | 40.340909 | 93 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/utils/outliers.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Outliers detection and remove."""
import numpy as np
def is_outlier(x, p25, p75):
"""Check if value is an outlier."""
lower = p25 - 1.5 * (p75 - p25)
upper = p75 + 1.5 * (p75 - p25)
return x <= lower or x >= upper
def remove_outlier(x, p_bottom: int = 25, p_top: int = 75):
"""Remove outlier from x."""
p_bottom = np.percentile(x, p_bottom)
p_top = np.percentile(x, p_top)
indices_of_outliers = []
for ind, value in enumerate(x):
if is_outlier(value, p_bottom, p_top):
indices_of_outliers.append(ind)
x[indices_of_outliers] = 0.0
# replace by mean f0.
x[indices_of_outliers] = np.max(x)
return x
| 1,293 | 30.560976 | 74 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/utils/group_conv.py | # -*- coding: utf-8 -*-
# This code is copy from https://github.com/tensorflow/tensorflow/pull/36773.
"""Group Convolution Modules."""
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations, constraints, initializers, regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
from tensorflow.python.keras.layers import Conv1D, SeparableConv1D
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.ops import array_ops, nn, nn_ops
class Convolution(object):
"""Helper class for convolution.
Note that this class assumes that shapes of input and filter passed to
__call__ are compatible with input_shape and filter_shape passed to the
constructor.
Arguments
input_shape: static shape of input. i.e. input.get_shape().
filter_shape: static shape of the filter. i.e. filter.get_shape().
padding: see convolution.
strides: see convolution.
dilation_rate: see convolution.
name: see convolution.
data_format: see convolution.
"""
def __init__(
self,
input_shape,
filter_shape,
padding,
strides=None,
dilation_rate=None,
name=None,
data_format=None,
):
"""Helper function for convolution."""
num_total_dims = filter_shape.ndims
if num_total_dims is None:
num_total_dims = input_shape.ndims
if num_total_dims is None:
raise ValueError("rank of input or filter must be known")
num_spatial_dims = num_total_dims - 2
try:
input_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError("input tensor must have rank %d" % (num_spatial_dims + 2))
try:
filter_shape.with_rank(num_spatial_dims + 2)
except ValueError:
raise ValueError("filter tensor must have rank %d" % (num_spatial_dims + 2))
if data_format is None or not data_format.startswith("NC"):
input_channels_dim = tensor_shape.dimension_at_index(
input_shape, num_spatial_dims + 1
)
spatial_dims = range(1, num_spatial_dims + 1)
else:
input_channels_dim = tensor_shape.dimension_at_index(input_shape, 1)
spatial_dims = range(2, num_spatial_dims + 2)
filter_dim = tensor_shape.dimension_at_index(filter_shape, num_spatial_dims)
if not (input_channels_dim % filter_dim).is_compatible_with(0):
raise ValueError(
"number of input channels is not divisible by corresponding "
"dimension of filter, {} % {} != 0".format(
input_channels_dim, filter_dim
)
)
strides, dilation_rate = nn_ops._get_strides_and_dilation_rate(
num_spatial_dims, strides, dilation_rate
)
self.input_shape = input_shape
self.filter_shape = filter_shape
self.data_format = data_format
self.strides = strides
self.padding = padding
self.name = name
self.dilation_rate = dilation_rate
self.conv_op = nn_ops._WithSpaceToBatch(
input_shape,
dilation_rate=dilation_rate,
padding=padding,
build_op=self._build_op,
filter_shape=filter_shape,
spatial_dims=spatial_dims,
data_format=data_format,
)
def _build_op(self, _, padding):
return nn_ops._NonAtrousConvolution(
self.input_shape,
filter_shape=self.filter_shape,
padding=padding,
data_format=self.data_format,
strides=self.strides,
name=self.name,
)
def __call__(self, inp, filter):
return self.conv_op(inp, filter)
class Conv(Layer):
"""Abstract N-D convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Note: layer attributes cannot be modified after the layer has been called
once (except the `trainable` attribute).
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch_size, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
groups: Integer, the number of channel groups controlling the connections
between inputs and outputs. Input channels and `filters` must both be
divisible by `groups`. For example,
- At `groups=1`, all inputs are convolved to all outputs.
- At `groups=2`, the operation becomes equivalent to having two
convolutional layers side by side, each seeing half the input
channels, and producing half the output channels, and both
subsequently concatenated.
- At `groups=input_channels`, each input channel is convolved with its
own set of filters, of size `input_channels / filters`
activation: Activation function to use.
If you don't specify anything, no activation is applied.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
name: A string, the name of the layer.
"""
def __init__(
self,
rank,
filters,
kernel_size,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs
):
super(Conv, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs
)
self.rank = rank
if filters is not None and not isinstance(filters, int):
filters = int(filters)
self.filters = filters
self.groups = groups or 1
if filters is not None and filters % self.groups != 0:
raise ValueError(
"The number of filters must be evenly divisible by the number of "
"groups. Received: groups={}, filters={}".format(groups, filters)
)
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, "kernel_size")
if not all(self.kernel_size):
raise ValueError(
"The argument `kernel_size` cannot contain 0(s). "
"Received: %s" % (kernel_size,)
)
self.strides = conv_utils.normalize_tuple(strides, rank, "strides")
self.padding = conv_utils.normalize_padding(padding)
if self.padding == "causal" and not isinstance(self, (Conv1D, SeparableConv1D)):
raise ValueError(
"Causal padding is only supported for `Conv1D`"
"and ``SeparableConv1D`."
)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, rank, "dilation_rate"
)
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = InputSpec(ndim=self.rank + 2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_channel = self._get_input_channel(input_shape)
if input_channel % self.groups != 0:
raise ValueError(
"The number of input channels must be evenly divisible by the number "
"of groups. Received groups={}, but the input has {} channels "
"(full input shape is {}).".format(
self.groups, input_channel, input_shape
)
)
kernel_shape = self.kernel_size + (input_channel // self.groups, self.filters)
self.kernel = self.add_weight(
name="kernel",
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype,
)
if self.use_bias:
self.bias = self.add_weight(
name="bias",
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype,
)
else:
self.bias = None
channel_axis = self._get_channel_axis()
self.input_spec = InputSpec(
ndim=self.rank + 2, axes={channel_axis: input_channel}
)
self._build_conv_op_input_shape = input_shape
self._build_input_channel = input_channel
self._padding_op = self._get_padding_op()
self._conv_op_data_format = conv_utils.convert_data_format(
self.data_format, self.rank + 2
)
self._convolution_op = Convolution(
input_shape,
filter_shape=self.kernel.shape,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self._padding_op,
data_format=self._conv_op_data_format,
)
self.built = True
def call(self, inputs):
if self._recreate_conv_op(inputs):
self._convolution_op = Convolution(
inputs.get_shape(),
filter_shape=self.kernel.shape,
dilation_rate=self.dilation_rate,
strides=self.strides,
padding=self._padding_op,
data_format=self._conv_op_data_format,
)
self._build_conv_op_input_shape = inputs.get_shape()
# Apply causal padding to inputs for Conv1D.
if self.padding == "causal" and self.__class__.__name__ == "Conv1D":
inputs = array_ops.pad(inputs, self._compute_causal_padding())
outputs = self._convolution_op(inputs, self.kernel)
if self.use_bias:
if self.data_format == "channels_first":
if self.rank == 1:
# nn.bias_add does not accept a 1D input tensor.
bias = array_ops.reshape(self.bias, (1, self.filters, 1))
outputs += bias
else:
outputs = nn.bias_add(outputs, self.bias, data_format="NCHW")
else:
outputs = nn.bias_add(outputs, self.bias, data_format="NHWC")
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == "channels_last":
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i],
)
new_space.append(new_dim)
return tensor_shape.TensorShape(
[input_shape[0]] + new_space + [self.filters]
)
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i],
)
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0], self.filters] + new_space)
def get_config(self):
config = {
"filters": self.filters,
"kernel_size": self.kernel_size,
"strides": self.strides,
"padding": self.padding,
"data_format": self.data_format,
"dilation_rate": self.dilation_rate,
"groups": self.groups,
"activation": activations.serialize(self.activation),
"use_bias": self.use_bias,
"kernel_initializer": initializers.serialize(self.kernel_initializer),
"bias_initializer": initializers.serialize(self.bias_initializer),
"kernel_regularizer": regularizers.serialize(self.kernel_regularizer),
"bias_regularizer": regularizers.serialize(self.bias_regularizer),
"activity_regularizer": regularizers.serialize(self.activity_regularizer),
"kernel_constraint": constraints.serialize(self.kernel_constraint),
"bias_constraint": constraints.serialize(self.bias_constraint),
}
base_config = super(Conv, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _compute_causal_padding(self):
"""Calculates padding for 'causal' option for 1-d conv layers."""
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if self.data_format == "channels_last":
causal_padding = [[0, 0], [left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0], [0, 0], [left_pad, 0]]
return causal_padding
def _get_channel_axis(self):
if self.data_format == "channels_first":
return 1
else:
return -1
def _get_input_channel(self, input_shape):
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError(
"The channel dimension of the inputs "
"should be defined. Found `None`."
)
return int(input_shape[channel_axis])
def _get_padding_op(self):
if self.padding == "causal":
op_padding = "valid"
else:
op_padding = self.padding
if not isinstance(op_padding, (list, tuple)):
op_padding = op_padding.upper()
return op_padding
def _recreate_conv_op(self, inputs):
"""Recreate conv_op if necessary.
Check if the input_shape in call() is different from that in build().
For the values that are not None, if they are different, recreate
the _convolution_op to avoid the stateful behavior.
Args:
inputs: The input data to call() method.
Returns:
`True` or `False` to indicate whether to recreate the conv_op.
"""
call_input_shape = inputs.get_shape()
for axis in range(1, len(call_input_shape)):
if (
call_input_shape[axis] is not None
and self._build_conv_op_input_shape[axis] is not None
and call_input_shape[axis] != self._build_conv_op_input_shape[axis]
):
return True
return False
class GroupConv1D(Conv):
"""1D convolution layer (e.g. temporal convolution).
This layer creates a convolution kernel that is convolved
with the layer input over a single spatial (or temporal) dimension
to produce a tensor of outputs.
If `use_bias` is True, a bias vector is created and added to the outputs.
Finally, if `activation` is not `None`,
it is applied to the outputs as well.
When using this layer as the first layer in a model,
provide an `input_shape` argument
(tuple of integers or `None`, e.g.
`(10, 128)` for sequences of 10 vectors of 128-dimensional vectors,
or `(None, 128)` for variable-length sequences of 128-dimensional vectors.
Examples:
>>> # The inputs are 128-length vectors with 10 timesteps, and the batch size
>>> # is 4.
>>> input_shape = (4, 10, 128)
>>> x = tf.random.normal(input_shape)
>>> y = tf.keras.layers.Conv1D(
... 32, 3, activation='relu',input_shape=input_shape)(x)
>>> print(y.shape)
(4, 8, 32)
Arguments:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of a single integer,
specifying the length of the 1D convolution window.
strides: An integer or tuple/list of a single integer,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"causal"` or `"same"` (case-insensitive).
`"causal"` results in causal (dilated) convolutions, e.g. `output[t]`
does not depend on `input[t+1:]`. Useful when modeling temporal data
where the model should not violate the temporal order.
See [WaveNet: A Generative Model for Raw Audio, section
2.1](https://arxiv.org/abs/1609.03499).
data_format: A string,
one of `channels_last` (default) or `channels_first`.
groups: Integer, the number of channel groups controlling the connections
between inputs and outputs. Input channels and `filters` must both be
divisible by `groups`. For example,
- At `groups=1`, all inputs are convolved to all outputs.
- At `groups=2`, the operation becomes equivalent to having two
convolutional layers side by side, each seeing half the input
channels, and producing half the output channels, and both
subsequently concatenated.
- At `groups=input_channels`, each input channel is convolved with its
own set of filters, of size `input_channels / filters`
dilation_rate: an integer or tuple/list of a single integer, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (
see `keras.initializers`).
bias_initializer: Initializer for the bias vector (
see `keras.initializers`).
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (
see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
3D tensor with shape: `(batch_size, steps, input_dim)`
Output shape:
3D tensor with shape: `(batch_size, new_steps, filters)`
`steps` value might have changed due to padding or strides.
Returns:
A tensor of rank 3 representing
`activation(conv1d(inputs, kernel) + bias)`.
Raises:
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
"""
def __init__(
self,
filters,
kernel_size,
strides=1,
padding="valid",
data_format="channels_last",
dilation_rate=1,
groups=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs
):
super().__init__(
rank=1,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
groups=groups,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs
)
| 23,944 | 41.989228 | 88 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/utils/__init__.py | from tensorflow_tts.utils.cleaners import (
basic_cleaners,
collapse_whitespace,
convert_to_ascii,
english_cleaners,
expand_abbreviations,
expand_numbers,
lowercase,
transliteration_cleaners,
)
from tensorflow_tts.utils.decoder import dynamic_decode
from tensorflow_tts.utils.griffin_lim import TFGriffinLim, griffin_lim_lb
from tensorflow_tts.utils.group_conv import GroupConv1D
from tensorflow_tts.utils.number_norm import normalize_numbers
from tensorflow_tts.utils.outliers import remove_outlier
from tensorflow_tts.utils.strategy import (
calculate_2d_loss,
calculate_3d_loss,
return_strategy,
)
from tensorflow_tts.utils.utils import find_files, MODEL_FILE_NAME, CONFIG_FILE_NAME, PROCESSOR_FILE_NAME, CACHE_DIRECTORY, LIBRARY_NAME
from tensorflow_tts.utils.weight_norm import WeightNormalization
| 847 | 35.869565 | 136 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/utils/griffin_lim.py | # -*- coding: utf-8 -*-
# Copyright 2020 Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Griffin-Lim phase reconstruction algorithm from mel spectrogram."""
import os
import librosa
import numpy as np
import soundfile as sf
import tensorflow as tf
from sklearn.preprocessing import StandardScaler
def griffin_lim_lb(
mel_spec, stats_path, dataset_config, n_iter=32, output_dir=None, wav_name="lb"
):
"""Generate wave from mel spectrogram with Griffin-Lim algorithm using Librosa.
Args:
mel_spec (ndarray): array representing the mel spectrogram.
stats_path (str): path to the `stats.npy` file containing norm statistics.
dataset_config (Dict): dataset configuration parameters.
n_iter (int): number of iterations for GL.
output_dir (str): output directory where audio file will be saved.
wav_name (str): name of the output file.
Returns:
gl_lb (ndarray): generated wave.
"""
scaler = StandardScaler()
scaler.mean_, scaler.scale_ = np.load(stats_path)
mel_spec = np.power(10.0, scaler.inverse_transform(mel_spec)).T
mel_basis = librosa.filters.mel(
dataset_config["sampling_rate"],
n_fft=dataset_config["fft_size"],
n_mels=dataset_config["num_mels"],
fmin=dataset_config["fmin"],
fmax=dataset_config["fmax"],
)
mel_to_linear = np.maximum(1e-10, np.dot(np.linalg.pinv(mel_basis), mel_spec))
gl_lb = librosa.griffinlim(
mel_to_linear,
n_iter=n_iter,
hop_length=dataset_config["hop_size"],
win_length=dataset_config["win_length"] or dataset_config["fft_size"],
)
if output_dir:
output_path = os.path.join(output_dir, f"{wav_name}.wav")
sf.write(output_path, gl_lb, dataset_config["sampling_rate"], "PCM_16")
return gl_lb
class TFGriffinLim(tf.keras.layers.Layer):
"""Griffin-Lim algorithm for phase reconstruction from mel spectrogram magnitude."""
def __init__(self, stats_path, dataset_config, normalized: bool = True):
"""Init GL params.
Args:
stats_path (str): path to the `stats.npy` file containing norm statistics.
dataset_config (Dict): dataset configuration parameters.
"""
super().__init__()
self.normalized = normalized
if normalized:
scaler = StandardScaler()
scaler.mean_, scaler.scale_ = np.load(stats_path)
self.scaler = scaler
self.ds_config = dataset_config
self.mel_basis = librosa.filters.mel(
self.ds_config["sampling_rate"],
n_fft=self.ds_config["fft_size"],
n_mels=self.ds_config["num_mels"],
fmin=self.ds_config["fmin"],
fmax=self.ds_config["fmax"],
) # [num_mels, fft_size // 2 + 1]
def save_wav(self, gl_tf, output_dir, wav_name):
"""Generate WAV file and save it.
Args:
gl_tf (tf.Tensor): reconstructed signal from GL algorithm.
output_dir (str): output directory where audio file will be saved.
wav_name (str): name of the output file.
"""
encode_fn = lambda x: tf.audio.encode_wav(x, self.ds_config["sampling_rate"])
gl_tf = tf.expand_dims(gl_tf, -1)
if not isinstance(wav_name, list):
wav_name = [wav_name]
if len(gl_tf.shape) > 2:
bs, *_ = gl_tf.shape
assert bs == len(wav_name), "Batch and 'wav_name' have different size."
tf_wav = tf.map_fn(encode_fn, gl_tf, dtype=tf.string)
for idx in tf.range(bs):
output_path = os.path.join(output_dir, f"{wav_name[idx]}.wav")
tf.io.write_file(output_path, tf_wav[idx])
else:
tf_wav = encode_fn(gl_tf)
tf.io.write_file(os.path.join(output_dir, f"{wav_name[0]}.wav"), tf_wav)
@tf.function(
input_signature=[
tf.TensorSpec(shape=[None, None, None], dtype=tf.float32),
tf.TensorSpec(shape=[], dtype=tf.int32),
]
)
def call(self, mel_spec, n_iter=32):
"""Apply GL algorithm to batched mel spectrograms.
Args:
mel_spec (tf.Tensor): normalized mel spectrogram.
n_iter (int): number of iterations to run GL algorithm.
Returns:
(tf.Tensor): reconstructed signal from GL algorithm.
"""
# de-normalize mel spectogram
if self.normalized:
mel_spec = tf.math.pow(
10.0, mel_spec * self.scaler.scale_ + self.scaler.mean_
)
else:
mel_spec = tf.math.pow(
10.0, mel_spec
) # TODO @dathudeptrai check if its ok without it wavs were too quiet
inverse_mel = tf.linalg.pinv(self.mel_basis)
# [:, num_mels] @ [fft_size // 2 + 1, num_mels].T
mel_to_linear = tf.linalg.matmul(mel_spec, inverse_mel, transpose_b=True)
mel_to_linear = tf.cast(tf.math.maximum(1e-10, mel_to_linear), tf.complex64)
init_phase = tf.cast(
tf.random.uniform(tf.shape(mel_to_linear), maxval=1), tf.complex64
)
phase = tf.math.exp(2j * np.pi * init_phase)
for _ in tf.range(n_iter):
inverse = tf.signal.inverse_stft(
mel_to_linear * phase,
frame_length=self.ds_config["win_length"] or self.ds_config["fft_size"],
frame_step=self.ds_config["hop_size"],
fft_length=self.ds_config["fft_size"],
window_fn=tf.signal.inverse_stft_window_fn(self.ds_config["hop_size"]),
)
phase = tf.signal.stft(
inverse,
self.ds_config["win_length"] or self.ds_config["fft_size"],
self.ds_config["hop_size"],
self.ds_config["fft_size"],
)
phase /= tf.cast(tf.maximum(1e-10, tf.abs(phase)), tf.complex64)
return tf.signal.inverse_stft(
mel_to_linear * phase,
frame_length=self.ds_config["win_length"] or self.ds_config["fft_size"],
frame_step=self.ds_config["hop_size"],
fft_length=self.ds_config["fft_size"],
window_fn=tf.signal.inverse_stft_window_fn(self.ds_config["hop_size"]),
)
| 6,824 | 39.868263 | 88 | py |
TensorFlowTTS | TensorFlowTTS-master/tensorflow_tts/utils/weight_norm.py | # -*- coding: utf-8 -*-
# Copyright 2019 The TensorFlow Probability Authors and Minh Nguyen (@dathudeptrai)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Weight Norm Modules."""
import warnings
import tensorflow as tf
class WeightNormalization(tf.keras.layers.Wrapper):
"""Layer wrapper to decouple magnitude and direction of the layer's weights.
This wrapper reparameterizes a layer by decoupling the weight's
magnitude and direction. This speeds up convergence by improving the
conditioning of the optimization problem. It has an optional data-dependent
initialization scheme, in which initial values of weights are set as functions
of the first minibatch of data. Both the weight normalization and data-
dependent initialization are described in [Salimans and Kingma (2016)][1].
#### Example
```python
net = WeightNorm(tf.keras.layers.Conv2D(2, 2, activation='relu'),
input_shape=(32, 32, 3), data_init=True)(x)
net = WeightNorm(tf.keras.layers.Conv2DTranspose(16, 5, activation='relu'),
data_init=True)
net = WeightNorm(tf.keras.layers.Dense(120, activation='relu'),
data_init=True)(net)
net = WeightNorm(tf.keras.layers.Dense(num_classes),
data_init=True)(net)
```
#### References
[1]: Tim Salimans and Diederik P. Kingma. Weight Normalization: A Simple
Reparameterization to Accelerate Training of Deep Neural Networks. In
_30th Conference on Neural Information Processing Systems_, 2016.
https://arxiv.org/abs/1602.07868
"""
def __init__(self, layer, data_init=True, **kwargs):
"""Initialize WeightNorm wrapper.
Args:
layer: A `tf.keras.layers.Layer` instance. Supported layer types are
`Dense`, `Conv2D`, and `Conv2DTranspose`. Layers with multiple inputs
are not supported.
data_init: `bool`, if `True` use data dependent variable initialization.
**kwargs: Additional keyword args passed to `tf.keras.layers.Wrapper`.
Raises:
ValueError: If `layer` is not a `tf.keras.layers.Layer` instance.
"""
if not isinstance(layer, tf.keras.layers.Layer):
raise ValueError(
"Please initialize `WeightNorm` layer with a `tf.keras.layers.Layer` "
"instance. You passed: {input}".format(input=layer)
)
layer_type = type(layer).__name__
if layer_type not in [
"Dense",
"Conv2D",
"Conv2DTranspose",
"Conv1D",
"GroupConv1D",
]:
warnings.warn(
"`WeightNorm` is tested only for `Dense`, `Conv2D`, `Conv1D`, `GroupConv1D`, "
"`GroupConv2D`, and `Conv2DTranspose` layers. You passed a layer of type `{}`".format(
layer_type
)
)
super().__init__(layer, **kwargs)
self.data_init = data_init
self._track_trackable(layer, name="layer")
self.filter_axis = -2 if layer_type == "Conv2DTranspose" else -1
def _compute_weights(self):
"""Generate weights with normalization."""
# Determine the axis along which to expand `g` so that `g` broadcasts to
# the shape of `v`.
new_axis = -self.filter_axis - 3
self.layer.kernel = tf.nn.l2_normalize(
self.v, axis=self.kernel_norm_axes
) * tf.expand_dims(self.g, new_axis)
def _init_norm(self):
"""Set the norm of the weight vector."""
kernel_norm = tf.sqrt(
tf.reduce_sum(tf.square(self.v), axis=self.kernel_norm_axes)
)
self.g.assign(kernel_norm)
def _data_dep_init(self, inputs):
"""Data dependent initialization."""
# Normalize kernel first so that calling the layer calculates
# `tf.dot(v, x)/tf.norm(v)` as in (5) in ([Salimans and Kingma, 2016][1]).
self._compute_weights()
activation = self.layer.activation
self.layer.activation = None
use_bias = self.layer.bias is not None
if use_bias:
bias = self.layer.bias
self.layer.bias = tf.zeros_like(bias)
# Since the bias is initialized as zero, setting the activation to zero and
# calling the initialized layer (with normalized kernel) yields the correct
# computation ((5) in Salimans and Kingma (2016))
x_init = self.layer(inputs)
norm_axes_out = list(range(x_init.shape.rank - 1))
m_init, v_init = tf.nn.moments(x_init, norm_axes_out)
scale_init = 1.0 / tf.sqrt(v_init + 1e-10)
self.g.assign(self.g * scale_init)
if use_bias:
self.layer.bias = bias
self.layer.bias.assign(-m_init * scale_init)
self.layer.activation = activation
def build(self, input_shape=None):
"""Build `Layer`.
Args:
input_shape: The shape of the input to `self.layer`.
Raises:
ValueError: If `Layer` does not contain a `kernel` of weights
"""
if not self.layer.built:
self.layer.build(input_shape)
if not hasattr(self.layer, "kernel"):
raise ValueError(
"`WeightNorm` must wrap a layer that"
" contains a `kernel` for weights"
)
self.kernel_norm_axes = list(range(self.layer.kernel.shape.ndims))
self.kernel_norm_axes.pop(self.filter_axis)
self.v = self.layer.kernel
# to avoid a duplicate `kernel` variable after `build` is called
self.layer.kernel = None
self.g = self.add_weight(
name="g",
shape=(int(self.v.shape[self.filter_axis]),),
initializer="ones",
dtype=self.v.dtype,
trainable=True,
)
self.initialized = self.add_weight(
name="initialized", dtype=tf.bool, trainable=False
)
self.initialized.assign(False)
super().build()
def call(self, inputs):
"""Call `Layer`."""
if not self.initialized:
if self.data_init:
self._data_dep_init(inputs)
else:
# initialize `g` as the norm of the initialized kernel
self._init_norm()
self.initialized.assign(True)
self._compute_weights()
output = self.layer(inputs)
return output
def compute_output_shape(self, input_shape):
return tf.TensorShape(self.layer.compute_output_shape(input_shape).as_list())
| 7,216 | 38.010811 | 102 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.